aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/authz/base.h17
-rw-r--r--include/authz/list.h19
-rw-r--r--include/authz/listfile.h21
-rw-r--r--include/authz/pamacct.h19
-rw-r--r--include/authz/simple.h19
-rw-r--r--include/block/accounting.h7
-rw-r--r--include/block/aio-wait.h27
-rw-r--r--include/block/aio.h203
-rw-r--r--include/block/aio_task.h2
-rw-r--r--include/block/block-common.h565
-rw-r--r--include/block/block-copy.h80
-rw-r--r--include/block/block-global-state.h305
-rw-r--r--include/block/block-hmp-cmds.h8
-rw-r--r--include/block/block-io.h455
-rw-r--r--include/block/block.h806
-rw-r--r--include/block/block_backup.h2
-rw-r--r--include/block/block_int-common.h1321
-rw-r--r--include/block/block_int-global-state.h326
-rw-r--r--include/block/block_int-io.h194
-rw-r--r--include/block/block_int.h1359
-rw-r--r--include/block/blockjob.h102
-rw-r--r--include/block/blockjob_int.h66
-rw-r--r--include/block/dirty-bitmap.h18
-rw-r--r--include/block/export.h91
-rw-r--r--include/block/fuse.h30
-rw-r--r--include/block/graph-lock.h278
-rw-r--r--include/block/nbd.h259
-rw-r--r--include/block/nvme.h1240
-rw-r--r--include/block/qapi.h33
-rw-r--r--include/block/qdict.h5
-rw-r--r--include/block/raw-aio.h27
-rw-r--r--include/block/replication.h175
-rw-r--r--include/block/reqlist.h75
-rw-r--r--include/block/snapshot.h61
-rw-r--r--include/block/thread-pool.h20
-rw-r--r--include/block/throttle-groups.h13
-rw-r--r--include/block/ufs.h1090
-rw-r--r--include/block/write-threshold.h27
-rw-r--r--include/chardev/char-fd.h9
-rw-r--r--include/chardev/char-fe.h47
-rw-r--r--include/chardev/char-socket.h87
-rw-r--r--include/chardev/char-win.h9
-rw-r--r--include/chardev/char.h56
-rw-r--r--include/chardev/spice.h13
-rw-r--r--include/crypto/aes-round.h164
-rw-r--r--include/crypto/aes.h37
-rw-r--r--include/crypto/akcipher.h179
-rw-r--r--include/crypto/block.h50
-rw-r--r--include/crypto/cipher.h4
-rw-r--r--include/crypto/clmul.h83
-rw-r--r--include/crypto/desrfb.h16
-rw-r--r--include/crypto/ivgen.h4
-rw-r--r--include/crypto/secret.h6
-rw-r--r--include/crypto/secret_common.h26
-rw-r--r--include/crypto/secret_keyring.h24
-rw-r--r--include/crypto/sm4.h15
-rw-r--r--include/crypto/tls-cipher-suites.h17
-rw-r--r--include/crypto/tlscreds.h38
-rw-r--r--include/crypto/tlscredsanon.h19
-rw-r--r--include/crypto/tlscredspsk.h19
-rw-r--r--include/crypto/tlscredsx509.h17
-rw-r--r--include/crypto/tlssession.h11
-rw-r--r--include/disas/dis-asm.h204
-rw-r--r--include/disas/disas.h23
-rw-r--r--include/elf.h1885
-rw-r--r--include/exec/address-spaces.h2
-rw-r--r--include/exec/confidential-guest-support.h94
-rw-r--r--include/exec/cpu-all.h336
-rw-r--r--include/exec/cpu-common.h182
-rw-r--r--include/exec/cpu-defs.h173
-rw-r--r--include/exec/cpu_ldst.h400
-rw-r--r--include/exec/cputlb.h3
-rw-r--r--include/exec/exec-all.h560
-rw-r--r--include/exec/gdbstub.h268
-rw-r--r--include/exec/gen-icount.h86
-rw-r--r--include/exec/helper-gen-common.h18
-rw-r--r--include/exec/helper-gen.h99
-rw-r--r--include/exec/helper-gen.h.inc102
-rw-r--r--include/exec/helper-head.h87
-rw-r--r--include/exec/helper-info.c.inc96
-rw-r--r--include/exec/helper-proto-common.h20
-rw-r--r--include/exec/helper-proto.h54
-rw-r--r--include/exec/helper-proto.h.inc68
-rw-r--r--include/exec/helper-tcg.h76
-rw-r--r--include/exec/hwaddr.h7
-rw-r--r--include/exec/ioport.h6
-rw-r--r--include/exec/log.h52
-rw-r--r--include/exec/memattrs.h30
-rw-r--r--include/exec/memop.h83
-rw-r--r--include/exec/memopidx.h55
-rw-r--r--include/exec/memory-internal.h4
-rw-r--r--include/exec/memory.h961
-rw-r--r--include/exec/memory_ldst.h.inc (renamed from include/exec/memory_ldst.inc.h)52
-rw-r--r--include/exec/memory_ldst_cached.h.inc (renamed from include/exec/memory_ldst_cached.inc.h)45
-rw-r--r--include/exec/memory_ldst_phys.h.inc (renamed from include/exec/memory_ldst_phys.inc.h)74
-rw-r--r--include/exec/page-vary.h52
-rw-r--r--include/exec/plugin-gen.h30
-rw-r--r--include/exec/poison.h28
-rw-r--r--include/exec/ram_addr.h107
-rw-r--r--include/exec/ramblock.h34
-rw-r--r--include/exec/ramlist.h17
-rw-r--r--include/exec/replay-core.h80
-rw-r--r--include/exec/softmmu-semi.h101
-rw-r--r--include/exec/target_long.h44
-rw-r--r--include/exec/target_page.h2
-rw-r--r--include/exec/tb-flush.h28
-rw-r--r--include/exec/tb-hash.h69
-rw-r--r--include/exec/tb-lookup.h51
-rw-r--r--include/exec/tlb-common.h56
-rw-r--r--include/exec/translate-all.h33
-rw-r--r--include/exec/translation-block.h154
-rw-r--r--include/exec/translator.h154
-rw-r--r--include/exec/tswap.h72
-rw-r--r--include/exec/user/abitypes.h12
-rw-r--r--include/exec/user/guest-base.h12
-rw-r--r--include/exec/user/thunk.h23
-rw-r--r--include/exec/vaddr.h18
-rw-r--r--include/fpu/softfloat-helpers.h21
-rw-r--r--include/fpu/softfloat-macros.h331
-rw-r--r--include/fpu/softfloat-types.h56
-rw-r--r--include/fpu/softfloat.h296
-rw-r--r--include/gdbstub/helpers.h103
-rw-r--r--include/gdbstub/syscalls.h122
-rw-r--r--include/gdbstub/user.h67
-rw-r--r--include/glib-compat.h97
-rw-r--r--include/hw/acpi/acpi-defs.h562
-rw-r--r--include/hw/acpi/acpi.h12
-rw-r--r--include/hw/acpi/acpi_aml_interface.h52
-rw-r--r--include/hw/acpi/acpi_dev_interface.h18
-rw-r--r--include/hw/acpi/acpi_generic_initiator.h47
-rw-r--r--include/hw/acpi/aml-build.h98
-rw-r--r--include/hw/acpi/cpu.h12
-rw-r--r--include/hw/acpi/cxl.h30
-rw-r--r--include/hw/acpi/erst.h27
-rw-r--r--include/hw/acpi/generic_event_device.h25
-rw-r--r--include/hw/acpi/ghes.h12
-rw-r--r--include/hw/acpi/ich9.h18
-rw-r--r--include/hw/acpi/ich9_tco.h (renamed from include/hw/acpi/tco.h)3
-rw-r--r--include/hw/acpi/ipmi.h9
-rw-r--r--include/hw/acpi/memory_hotplug.h1
-rw-r--r--include/hw/acpi/pc-hotplug.h2
-rw-r--r--include/hw/acpi/pci.h8
-rw-r--r--include/hw/acpi/pcihp.h17
-rw-r--r--include/hw/acpi/piix4.h73
-rw-r--r--include/hw/acpi/tpm.h45
-rw-r--r--include/hw/acpi/utils.h3
-rw-r--r--include/hw/acpi/vmgenid.h17
-rw-r--r--include/hw/adc/aspeed_adc.h56
-rw-r--r--include/hw/adc/max111x.h (renamed from include/hw/misc/max111x.h)10
-rw-r--r--include/hw/adc/npcm7xx_adc.h68
-rw-r--r--include/hw/adc/stm32f2xx_adc.h8
-rw-r--r--include/hw/adc/zynq-xadc.h (renamed from include/hw/misc/zynq-xadc.h)13
-rw-r--r--include/hw/arm/allwinner-a10.h41
-rw-r--r--include/hw/arm/allwinner-h3.h83
-rw-r--r--include/hw/arm/allwinner-r40.h157
-rw-r--r--include/hw/arm/armsse-version.h42
-rw-r--r--include/hw/arm/armsse.h87
-rw-r--r--include/hw/arm/armv7m.h50
-rw-r--r--include/hw/arm/aspeed.h15
-rw-r--r--include/hw/arm/aspeed_soc.h237
-rw-r--r--include/hw/arm/bcm2835_peripherals.h52
-rw-r--r--include/hw/arm/bcm2836.h39
-rw-r--r--include/hw/arm/bcm2838.h31
-rw-r--r--include/hw/arm/bcm2838_peripherals.h84
-rw-r--r--include/hw/arm/boot.h68
-rw-r--r--include/hw/arm/bsa.h35
-rw-r--r--include/hw/arm/digic.h7
-rw-r--r--include/hw/arm/exynos4210.h62
-rw-r--r--include/hw/arm/fsl-imx25.h12
-rw-r--r--include/hw/arm/fsl-imx31.h10
-rw-r--r--include/hw/arm/fsl-imx6.h54
-rw-r--r--include/hw/arm/fsl-imx6ul.h150
-rw-r--r--include/hw/arm/fsl-imx7.h365
-rw-r--r--include/hw/arm/linux-boot-if.h11
-rw-r--r--include/hw/arm/msf2-soc.h18
-rw-r--r--include/hw/arm/npcm7xx.h139
-rw-r--r--include/hw/arm/nrf51_soc.h10
-rw-r--r--include/hw/arm/omap.h36
-rw-r--r--include/hw/arm/pxa.h27
-rw-r--r--include/hw/arm/raspberrypi-fw-defs.h173
-rw-r--r--include/hw/arm/raspi_platform.h105
-rw-r--r--include/hw/arm/smmu-common.h80
-rw-r--r--include/hw/arm/smmuv3.h24
-rw-r--r--include/hw/arm/stm32f100_soc.h61
-rw-r--r--include/hw/arm/stm32f205_soc.h22
-rw-r--r--include/hw/arm/stm32f405_soc.h22
-rw-r--r--include/hw/arm/stm32l4x5_soc.h67
-rw-r--r--include/hw/arm/virt.h91
-rw-r--r--include/hw/arm/xen_arch_hvm.h9
-rw-r--r--include/hw/arm/xlnx-versal.h195
-rw-r--r--include/hw/arm/xlnx-zynqmp.h55
-rw-r--r--include/hw/audio/asc.h85
-rw-r--r--include/hw/audio/pcspk.h10
-rw-r--r--include/hw/audio/soundhw.h8
-rw-r--r--include/hw/audio/virtio-snd.h250
-rw-r--r--include/hw/block/block.h15
-rw-r--r--include/hw/block/fdc.h6
-rw-r--r--include/hw/block/flash.h37
-rw-r--r--include/hw/block/swim.h36
-rw-r--r--include/hw/boards.h206
-rw-r--r--include/hw/char/avr_usart.h11
-rw-r--r--include/hw/char/bcm2835_aux.h7
-rw-r--r--include/hw/char/cadence_uart.h25
-rw-r--r--include/hw/char/cmsdk-apb-uart.h41
-rw-r--r--include/hw/char/digic-uart.h8
-rw-r--r--include/hw/char/escc.h8
-rw-r--r--include/hw/char/goldfish_tty.h36
-rw-r--r--include/hw/char/grlib_uart.h32
-rw-r--r--include/hw/char/ibex_uart.h46
-rw-r--r--include/hw/char/imx_serial.h28
-rw-r--r--include/hw/char/lm32_juart.h13
-rw-r--r--include/hw/char/mchp_pfsoc_mmuart.h68
-rw-r--r--include/hw/char/nrf51_uart.h7
-rw-r--r--include/hw/char/parallel-isa.h35
-rw-r--r--include/hw/char/parallel.h19
-rw-r--r--include/hw/char/pl011.h50
-rw-r--r--include/hw/char/renesas_sci.h9
-rw-r--r--include/hw/char/riscv_htif.h (renamed from include/hw/riscv/riscv_htif.h)17
-rw-r--r--include/hw/char/serial.h27
-rw-r--r--include/hw/char/shakti_uart.h74
-rw-r--r--include/hw/char/sifive_uart.h (renamed from include/hw/riscv/sifive_uart.h)15
-rw-r--r--include/hw/char/stm32f2xx_usart.h18
-rw-r--r--include/hw/char/xilinx_uartlite.h21
-rw-r--r--include/hw/clock.h210
-rw-r--r--include/hw/core/accel-cpu.h38
-rw-r--r--include/hw/core/cpu.h675
-rw-r--r--include/hw/core/generic-loader.h8
-rw-r--r--include/hw/core/resetcontainer.h48
-rw-r--r--include/hw/core/split-irq.h3
-rw-r--r--include/hw/core/sysbus-fdt.h (renamed from include/hw/arm/sysbus-fdt.h)0
-rw-r--r--include/hw/core/sysemu-cpu-ops.h92
-rw-r--r--include/hw/core/tcg-cpu-ops.h224
-rw-r--r--include/hw/cpu/a15mpcore.h8
-rw-r--r--include/hw/cpu/a9mpcore.h8
-rw-r--r--include/hw/cpu/arm11mpcore.h8
-rw-r--r--include/hw/cpu/cluster.h8
-rw-r--r--include/hw/cpu/core.h8
-rw-r--r--include/hw/cris/etraxfs.h5
-rw-r--r--include/hw/cxl/cxl.h70
-rw-r--r--include/hw/cxl/cxl_cdat.h172
-rw-r--r--include/hw/cxl/cxl_component.h280
-rw-r--r--include/hw/cxl/cxl_device.h506
-rw-r--r--include/hw/cxl/cxl_events.h169
-rw-r--r--include/hw/cxl/cxl_host.h22
-rw-r--r--include/hw/cxl/cxl_pci.h194
-rw-r--r--include/hw/display/bcm2835_fb.h9
-rw-r--r--include/hw/display/dpcd.h4
-rw-r--r--include/hw/display/edid.h17
-rw-r--r--include/hw/display/i2c-ddc.h4
-rw-r--r--include/hw/display/macfb.h69
-rw-r--r--include/hw/display/milkymist_tmu2.h42
-rw-r--r--include/hw/display/ramfb.h4
-rw-r--r--include/hw/display/vga.h10
-rw-r--r--include/hw/display/xlnx_dp.h19
-rw-r--r--include/hw/dma/bcm2835_dma.h8
-rw-r--r--include/hw/dma/i8257.h8
-rw-r--r--include/hw/dma/pl080.h14
-rw-r--r--include/hw/dma/sifive_pdma.h59
-rw-r--r--include/hw/dma/xlnx-zdma.h10
-rw-r--r--include/hw/dma/xlnx-zynq-devcfg.h8
-rw-r--r--include/hw/dma/xlnx_csu_dma.h72
-rw-r--r--include/hw/dma/xlnx_dpdma.h4
-rw-r--r--include/hw/elf_ops.h122
-rw-r--r--include/hw/firmware/smbios.h92
-rw-r--r--include/hw/fsi/aspeed_apb2opb.h46
-rw-r--r--include/hw/fsi/cfam.h34
-rw-r--r--include/hw/fsi/fsi-master.h32
-rw-r--r--include/hw/fsi/fsi.h37
-rw-r--r--include/hw/fsi/lbus.h43
-rw-r--r--include/hw/fw-path-provider.h11
-rw-r--r--include/hw/gpio/aspeed_gpio.h38
-rw-r--r--include/hw/gpio/bcm2835_gpio.h8
-rw-r--r--include/hw/gpio/bcm2838_gpio.h45
-rw-r--r--include/hw/gpio/imx_gpio.h7
-rw-r--r--include/hw/gpio/npcm7xx_gpio.h55
-rw-r--r--include/hw/gpio/nrf51_gpio.h8
-rw-r--r--include/hw/gpio/pca9552.h (renamed from include/hw/misc/pca9552.h)12
-rw-r--r--include/hw/gpio/pca9552_regs.h (renamed from include/hw/misc/pca9552_regs.h)0
-rw-r--r--include/hw/gpio/pca9554.h36
-rw-r--r--include/hw/gpio/pca9554_regs.h19
-rw-r--r--include/hw/gpio/pcf8574.h15
-rw-r--r--include/hw/gpio/sifive_gpio.h (renamed from include/hw/riscv/sifive_gpio.h)9
-rw-r--r--include/hw/gpio/stm32l4x5_gpio.h71
-rw-r--r--include/hw/hotplug.h13
-rw-r--r--include/hw/hw.h2
-rw-r--r--include/hw/hyperv/dynmem-proto.h430
-rw-r--r--include/hw/hyperv/hv-balloon.h18
-rw-r--r--include/hw/hyperv/hyperv-proto.h52
-rw-r--r--include/hw/hyperv/hyperv.h62
-rw-r--r--include/hw/hyperv/vmbus-bridge.h10
-rw-r--r--include/hw/hyperv/vmbus.h22
-rw-r--r--include/hw/i2c/allwinner-i2c.h61
-rw-r--r--include/hw/i2c/arm_sbcon_i2c.h19
-rw-r--r--include/hw/i2c/aspeed_i2c.h335
-rw-r--r--include/hw/i2c/bcm2835_i2c.h80
-rw-r--r--include/hw/i2c/bitbang_i2c.h2
-rw-r--r--include/hw/i2c/i2c.h115
-rw-r--r--include/hw/i2c/i2c_mux_pca954x.h19
-rw-r--r--include/hw/i2c/imx_i2c.h7
-rw-r--r--include/hw/i2c/microbit_i2c.h8
-rw-r--r--include/hw/i2c/npcm7xx_smbus.h112
-rw-r--r--include/hw/i2c/pmbus_device.h564
-rw-r--r--include/hw/i2c/pnv_i2c_regs.h143
-rw-r--r--include/hw/i2c/ppc4xx_i2c.h7
-rw-r--r--include/hw/i2c/smbus_slave.h15
-rw-r--r--include/hw/i386/apic.h11
-rw-r--r--include/hw/i386/apic_internal.h31
-rw-r--r--include/hw/i386/hostmem-epc.h28
-rw-r--r--include/hw/i386/intel_iommu.h119
-rw-r--r--include/hw/i386/ioapic_internal.h123
-rw-r--r--include/hw/i386/microvm.h74
-rw-r--r--include/hw/i386/pc.h161
-rw-r--r--include/hw/i386/sgx-epc.h71
-rw-r--r--include/hw/i386/topology.h132
-rw-r--r--include/hw/i386/vmport.h2
-rw-r--r--include/hw/i386/x86-iommu.h77
-rw-r--r--include/hw/i386/x86.h96
-rw-r--r--include/hw/i386/xen_arch_hvm.h11
-rw-r--r--include/hw/ide.h24
-rw-r--r--include/hw/ide/ahci-pci.h22
-rw-r--r--include/hw/ide/ahci-sysbus.h35
-rw-r--r--include/hw/ide/ahci.h37
-rw-r--r--include/hw/ide/ide-bus.h42
-rw-r--r--include/hw/ide/ide-dev.h186
-rw-r--r--include/hw/ide/ide-dma.h37
-rw-r--r--include/hw/ide/internal.h649
-rw-r--r--include/hw/ide/isa.h20
-rw-r--r--include/hw/ide/mmio.h26
-rw-r--r--include/hw/ide/pci.h20
-rw-r--r--include/hw/ide/piix.h7
-rw-r--r--include/hw/input/adb.h14
-rw-r--r--include/hw/input/gamepad.h18
-rw-r--r--include/hw/input/i8042.h93
-rw-r--r--include/hw/input/lasips2.h68
-rw-r--r--include/hw/input/lm832x.h28
-rw-r--r--include/hw/input/pl050.h58
-rw-r--r--include/hw/input/ps2.h77
-rw-r--r--include/hw/input/stellaris_gamepad.h37
-rw-r--r--include/hw/input/tsc2xxx.h12
-rw-r--r--include/hw/intc/allwinner-a10-pic.h7
-rw-r--r--include/hw/intc/arm_gic.h17
-rw-r--r--include/hw/intc/arm_gic_common.h21
-rw-r--r--include/hw/intc/arm_gicv3.h14
-rw-r--r--include/hw/intc/arm_gicv3_common.h75
-rw-r--r--include/hw/intc/arm_gicv3_its_common.h65
-rw-r--r--include/hw/intc/armv7m_nvic.h140
-rw-r--r--include/hw/intc/aspeed_vic.h7
-rw-r--r--include/hw/intc/bcm2835_ic.h7
-rw-r--r--include/hw/intc/bcm2836_control.h8
-rw-r--r--include/hw/intc/exynos4210_combiner.h57
-rw-r--r--include/hw/intc/exynos4210_gic.h43
-rw-r--r--include/hw/intc/goldfish_pic.h35
-rw-r--r--include/hw/intc/grlib_irqmp.h (renamed from include/hw/sparc/grlib.h)24
-rw-r--r--include/hw/intc/heathrow_pic.h7
-rw-r--r--include/hw/intc/i8259.h16
-rw-r--r--include/hw/intc/ibex_plic.h63
-rw-r--r--include/hw/intc/imx_avic.h7
-rw-r--r--include/hw/intc/imx_gpcv2.h7
-rw-r--r--include/hw/intc/intc.h11
-rw-r--r--include/hw/intc/ioapic.h (renamed from include/hw/i386/ioapic.h)10
-rw-r--r--include/hw/intc/kvm_irqcount.h10
-rw-r--r--include/hw/intc/loongarch_extioi.h68
-rw-r--r--include/hw/intc/loongarch_ipi.h54
-rw-r--r--include/hw/intc/loongarch_pch_msi.h25
-rw-r--r--include/hw/intc/loongarch_pch_pic.h69
-rw-r--r--include/hw/intc/loongson_liointc.h22
-rw-r--r--include/hw/intc/m68k_irqc.h42
-rw-r--r--include/hw/intc/mips_gic.h8
-rw-r--r--include/hw/intc/ppc-uic.h78
-rw-r--r--include/hw/intc/realview_gic.h8
-rw-r--r--include/hw/intc/riscv_aclint.h83
-rw-r--r--include/hw/intc/riscv_aplic.h79
-rw-r--r--include/hw/intc/riscv_imsic.h68
-rw-r--r--include/hw/intc/rx_icu.h6
-rw-r--r--include/hw/intc/sifive_plic.h (renamed from include/hw/riscv/sifive_plic.h)28
-rw-r--r--include/hw/intc/xlnx-pmu-iomod-intc.h8
-rw-r--r--include/hw/intc/xlnx-zynqmp-ipi.h8
-rw-r--r--include/hw/ipack/ipack.h22
-rw-r--r--include/hw/ipmi/ipmi.h35
-rw-r--r--include/hw/irq.h5
-rw-r--r--include/hw/isa/i8259_internal.h20
-rw-r--r--include/hw/isa/isa.h88
-rw-r--r--include/hw/isa/pc87312.h9
-rw-r--r--include/hw/isa/superio.h21
-rw-r--r--include/hw/isa/vt82c686.h39
-rw-r--r--include/hw/kvm/clock.h28
-rw-r--r--include/hw/lm32/lm32_pic.h10
-rw-r--r--include/hw/loader-fit.h2
-rw-r--r--include/hw/loader.h186
-rw-r--r--include/hw/loongarch/virt.h64
-rw-r--r--include/hw/m68k/mcf.h8
-rw-r--r--include/hw/m68k/mcf_fec.h3
-rw-r--r--include/hw/m68k/next-cube.h15
-rw-r--r--include/hw/m68k/q800-glue.h51
-rw-r--r--include/hw/m68k/q800.h78
-rw-r--r--include/hw/mem/memory-device.h86
-rw-r--r--include/hw/mem/npcm7xx_mc.h36
-rw-r--r--include/hw/mem/nvdimm.h27
-rw-r--r--include/hw/mem/pc-dimm.h25
-rw-r--r--include/hw/mem/sparse-mem.h19
-rw-r--r--include/hw/mips/bios.h14
-rw-r--r--include/hw/mips/bootloader.h26
-rw-r--r--include/hw/mips/cps.h11
-rw-r--r--include/hw/mips/cpudevs.h21
-rw-r--r--include/hw/mips/mips.h7
-rw-r--r--include/hw/misc/a9scu.h7
-rw-r--r--include/hw/misc/allwinner-a10-ccm.h67
-rw-r--r--include/hw/misc/allwinner-a10-dramc.h68
-rw-r--r--include/hw/misc/allwinner-cpucfg.h7
-rw-r--r--include/hw/misc/allwinner-h3-ccu.h7
-rw-r--r--include/hw/misc/allwinner-h3-dramc.h7
-rw-r--r--include/hw/misc/allwinner-h3-sysctrl.h7
-rw-r--r--include/hw/misc/allwinner-r40-ccu.h65
-rw-r--r--include/hw/misc/allwinner-r40-dramc.h108
-rw-r--r--include/hw/misc/allwinner-sid.h7
-rw-r--r--include/hw/misc/allwinner-sramc.h69
-rw-r--r--include/hw/misc/arm11scu.h7
-rw-r--r--include/hw/misc/arm_integrator_debug.h2
-rw-r--r--include/hw/misc/armsse-cpu-pwrctrl.h40
-rw-r--r--include/hw/misc/armsse-cpuid.h9
-rw-r--r--include/hw/misc/armsse-mhu.h9
-rw-r--r--include/hw/misc/armv7m_ras.h37
-rw-r--r--include/hw/misc/aspeed_hace.h50
-rw-r--r--include/hw/misc/aspeed_i3c.h48
-rw-r--r--include/hw/misc/aspeed_lpc.h45
-rw-r--r--include/hw/misc/aspeed_peci.h29
-rw-r--r--include/hw/misc/aspeed_sbc.h45
-rw-r--r--include/hw/misc/aspeed_scu.h65
-rw-r--r--include/hw/misc/aspeed_sdmc.h28
-rw-r--r--include/hw/misc/aspeed_xdma.h24
-rw-r--r--include/hw/misc/auxbus.h13
-rw-r--r--include/hw/misc/avr_power.h8
-rw-r--r--include/hw/misc/bcm2835_cprman.h210
-rw-r--r--include/hw/misc/bcm2835_cprman_internals.h1019
-rw-r--r--include/hw/misc/bcm2835_mbox.h8
-rw-r--r--include/hw/misc/bcm2835_mphi.h4
-rw-r--r--include/hw/misc/bcm2835_powermgt.h29
-rw-r--r--include/hw/misc/bcm2835_property.h9
-rw-r--r--include/hw/misc/bcm2835_rng.h8
-rw-r--r--include/hw/misc/bcm2835_thermal.h8
-rw-r--r--include/hw/misc/djmemc.h30
-rw-r--r--include/hw/misc/grlib_ahb_apb_pnp.h13
-rw-r--r--include/hw/misc/imx25_ccm.h7
-rw-r--r--include/hw/misc/imx31_ccm.h7
-rw-r--r--include/hw/misc/imx6_ccm.h7
-rw-r--r--include/hw/misc/imx6_src.h7
-rw-r--r--include/hw/misc/imx6ul_ccm.h7
-rw-r--r--include/hw/misc/imx7_ccm.h13
-rw-r--r--include/hw/misc/imx7_gpr.h7
-rw-r--r--include/hw/misc/imx7_snvs.h14
-rw-r--r--include/hw/misc/imx7_src.h66
-rw-r--r--include/hw/misc/imx_ccm.h16
-rw-r--r--include/hw/misc/imx_rngc.h7
-rw-r--r--include/hw/misc/iosb.h25
-rw-r--r--include/hw/misc/iotkit-secctl.h8
-rw-r--r--include/hw/misc/iotkit-sysctl.h23
-rw-r--r--include/hw/misc/iotkit-sysinfo.h12
-rw-r--r--include/hw/misc/lasi.h79
-rw-r--r--include/hw/misc/led.h98
-rw-r--r--include/hw/misc/mac_via.h133
-rw-r--r--include/hw/misc/macio/cuda.h15
-rw-r--r--include/hw/misc/macio/gpio.h9
-rw-r--r--include/hw/misc/macio/macio.h64
-rw-r--r--include/hw/misc/macio/pmu.h19
-rw-r--r--include/hw/misc/mchp_pfsoc_dmc.h58
-rw-r--r--include/hw/misc/mchp_pfsoc_ioscb.h56
-rw-r--r--include/hw/misc/mchp_pfsoc_sysreg.h42
-rw-r--r--include/hw/misc/mips_cmgcr.h6
-rw-r--r--include/hw/misc/mips_cpc.h9
-rw-r--r--include/hw/misc/mips_itu.h18
-rw-r--r--include/hw/misc/mos6522.h66
-rw-r--r--include/hw/misc/mps2-fpgaio.h17
-rw-r--r--include/hw/misc/mps2-scc.h41
-rw-r--r--include/hw/misc/msf2-sysreg.h7
-rw-r--r--include/hw/misc/npcm7xx_clk.h180
-rw-r--r--include/hw/misc/npcm7xx_gcr.h73
-rw-r--r--include/hw/misc/npcm7xx_mft.h69
-rw-r--r--include/hw/misc/npcm7xx_pwm.h106
-rw-r--r--include/hw/misc/npcm7xx_rng.h34
-rw-r--r--include/hw/misc/nrf51_rng.h7
-rw-r--r--include/hw/misc/pvpanic.h22
-rw-r--r--include/hw/misc/sifive_e_aon.h60
-rw-r--r--include/hw/misc/sifive_e_prci.h (renamed from include/hw/riscv/sifive_e_prci.h)11
-rw-r--r--include/hw/misc/sifive_test.h (renamed from include/hw/riscv/sifive_test.h)10
-rw-r--r--include/hw/misc/sifive_u_otp.h (renamed from include/hw/riscv/sifive_u_otp.h)16
-rw-r--r--include/hw/misc/sifive_u_prci.h (renamed from include/hw/riscv/sifive_u_prci.h)11
-rw-r--r--include/hw/misc/stm32f2xx_syscfg.h10
-rw-r--r--include/hw/misc/stm32f4xx_exti.h13
-rw-r--r--include/hw/misc/stm32f4xx_syscfg.h13
-rw-r--r--include/hw/misc/stm32l4x5_exti.h51
-rw-r--r--include/hw/misc/stm32l4x5_rcc.h239
-rw-r--r--include/hw/misc/stm32l4x5_rcc_internals.h1042
-rw-r--r--include/hw/misc/stm32l4x5_syscfg.h53
-rw-r--r--include/hw/misc/tz-mpc.h4
-rw-r--r--include/hw/misc/tz-msc.h7
-rw-r--r--include/hw/misc/tz-ppc.h4
-rw-r--r--include/hw/misc/unimp.h9
-rw-r--r--include/hw/misc/virt_ctrl.h24
-rw-r--r--include/hw/misc/vmcoreinfo.h11
-rw-r--r--include/hw/misc/xlnx-cfi-if.h59
-rw-r--r--include/hw/misc/xlnx-versal-cframe-reg.h303
-rw-r--r--include/hw/misc/xlnx-versal-cfu.h258
-rw-r--r--include/hw/misc/xlnx-versal-crl.h235
-rw-r--r--include/hw/misc/xlnx-versal-pmc-iou-slcr.h79
-rw-r--r--include/hw/misc/xlnx-versal-trng.h58
-rw-r--r--include/hw/misc/xlnx-versal-xramc.h97
-rw-r--r--include/hw/misc/xlnx-zynqmp-apu-ctrl.h93
-rw-r--r--include/hw/misc/xlnx-zynqmp-crf.h211
-rw-r--r--include/hw/net/allwinner-sun8i-emac.h15
-rw-r--r--include/hw/net/allwinner_emac.h7
-rw-r--r--include/hw/net/cadence_gem.h9
-rw-r--r--include/hw/net/dp8393x.h60
-rw-r--r--include/hw/net/ftgmac100.h13
-rw-r--r--include/hw/net/imx_fec.h9
-rw-r--r--include/hw/net/lan9118.h2
-rw-r--r--include/hw/net/lance.h10
-rw-r--r--include/hw/net/lasi_82596.h14
-rw-r--r--include/hw/net/mii.h16
-rw-r--r--include/hw/net/msf2-emac.h8
-rw-r--r--include/hw/net/mv88w8618_eth.h13
-rw-r--r--include/hw/net/ne2000-isa.h2
-rw-r--r--include/hw/net/npcm7xx_emc.h283
-rw-r--r--include/hw/net/npcm_gmac.h343
-rw-r--r--include/hw/net/smc91c111.h2
-rw-r--r--include/hw/net/xlnx-versal-canfd.h87
-rw-r--r--include/hw/net/xlnx-zynqmp-can.h79
-rw-r--r--include/hw/nmi.h11
-rw-r--r--include/hw/nubus/mac-nubus-bridge.h21
-rw-r--r--include/hw/nubus/nubus-virtio-mmio.h36
-rw-r--r--include/hw/nubus/nubus.h64
-rw-r--r--include/hw/nvram/chrp_nvram.h3
-rw-r--r--include/hw/nvram/eeprom_at24c.h39
-rw-r--r--include/hw/nvram/fw_cfg.h48
-rw-r--r--include/hw/nvram/fw_cfg_acpi.h14
-rw-r--r--include/hw/nvram/mac_nvram.h52
-rw-r--r--include/hw/nvram/npcm7xx_otp.h79
-rw-r--r--include/hw/nvram/nrf51_nvm.h7
-rw-r--r--include/hw/nvram/xlnx-bbram.h54
-rw-r--r--include/hw/nvram/xlnx-efuse.h132
-rw-r--r--include/hw/nvram/xlnx-versal-efuse.h68
-rw-r--r--include/hw/nvram/xlnx-zynqmp-efuse.h44
-rw-r--r--include/hw/openrisc/boot.h34
-rw-r--r--include/hw/or-irq.h4
-rw-r--r--include/hw/pci-bridge/cxl_upstream_port.h19
-rw-r--r--include/hw/pci-bridge/pci_expander_bridge.h12
-rw-r--r--include/hw/pci-bridge/simba.h8
-rw-r--r--include/hw/pci-bridge/xio3130_downstream.h15
-rw-r--r--include/hw/pci-host/articia.h17
-rw-r--r--include/hw/pci-host/astro.h94
-rw-r--r--include/hw/pci-host/bonito.h18
-rw-r--r--include/hw/pci-host/designware.h17
-rw-r--r--include/hw/pci-host/dino.h146
-rw-r--r--include/hw/pci-host/gpex.h45
-rw-r--r--include/hw/pci-host/grackle.h44
-rw-r--r--include/hw/pci-host/i440fx.h30
-rw-r--r--include/hw/pci-host/ls7a.h50
-rw-r--r--include/hw/pci-host/mv64361.h8
-rw-r--r--include/hw/pci-host/pam.h7
-rw-r--r--include/hw/pci-host/pnv_phb3.h44
-rw-r--r--include/hw/pci-host/pnv_phb3_regs.h16
-rw-r--r--include/hw/pci-host/pnv_phb4.h144
-rw-r--r--include/hw/pci-host/pnv_phb4_regs.h7
-rw-r--r--include/hw/pci-host/ppc4xx.h17
-rw-r--r--include/hw/pci-host/q35.h26
-rw-r--r--include/hw/pci-host/remote.h30
-rw-r--r--include/hw/pci-host/sabre.h17
-rw-r--r--include/hw/pci-host/spapr.h56
-rw-r--r--include/hw/pci-host/uninorth.h31
-rw-r--r--include/hw/pci-host/xilinx-pcie.h18
-rw-r--r--include/hw/pci/msi.h4
-rw-r--r--include/hw/pci/msix.h3
-rw-r--r--include/hw/pci/pci.h448
-rw-r--r--include/hw/pci/pci_bridge.h61
-rw-r--r--include/hw/pci/pci_bus.h15
-rw-r--r--include/hw/pci/pci_device.h350
-rw-r--r--include/hw/pci/pci_host.h15
-rw-r--r--include/hw/pci/pci_ids.h22
-rw-r--r--include/hw/pci/pci_regs.h1
-rw-r--r--include/hw/pci/pcie.h20
-rw-r--r--include/hw/pci/pcie_aer.h3
-rw-r--r--include/hw/pci/pcie_doe.h122
-rw-r--r--include/hw/pci/pcie_host.h10
-rw-r--r--include/hw/pci/pcie_port.h29
-rw-r--r--include/hw/pci/pcie_regs.h23
-rw-r--r--include/hw/pci/pcie_sriov.h82
-rw-r--r--include/hw/pci/shpc.h4
-rw-r--r--include/hw/pcmcia.h48
-rw-r--r--include/hw/platform-bus.h11
-rw-r--r--include/hw/ppc/fdt.h8
-rw-r--r--include/hw/ppc/mac_dbdma.h8
-rw-r--r--include/hw/ppc/openpic.h36
-rw-r--r--include/hw/ppc/pnv.h232
-rw-r--r--include/hw/ppc/pnv_chip.h162
-rw-r--r--include/hw/ppc/pnv_chiptod.h53
-rw-r--r--include/hw/ppc/pnv_core.h48
-rw-r--r--include/hw/ppc/pnv_homer.h29
-rw-r--r--include/hw/ppc/pnv_i2c.h38
-rw-r--r--include/hw/ppc/pnv_lpc.h47
-rw-r--r--include/hw/ppc/pnv_n1_chiplet.h32
-rw-r--r--include/hw/ppc/pnv_nest_pervasive.h32
-rw-r--r--include/hw/ppc/pnv_occ.h34
-rw-r--r--include/hw/ppc/pnv_pnor.h15
-rw-r--r--include/hw/ppc/pnv_psi.h40
-rw-r--r--include/hw/ppc/pnv_sbe.h56
-rw-r--r--include/hw/ppc/pnv_xive.h92
-rw-r--r--include/hw/ppc/pnv_xscom.h85
-rw-r--r--include/hw/ppc/ppc.h20
-rw-r--r--include/hw/ppc/ppc4xx.h138
-rw-r--r--include/hw/ppc/spapr.h195
-rw-r--r--include/hw/ppc/spapr_cpu_core.h23
-rw-r--r--include/hw/ppc/spapr_drc.h63
-rw-r--r--include/hw/ppc/spapr_irq.h62
-rw-r--r--include/hw/ppc/spapr_nested.h524
-rw-r--r--include/hw/ppc/spapr_numa.h37
-rw-r--r--include/hw/ppc/spapr_nvdimm.h25
-rw-r--r--include/hw/ppc/spapr_ovec.h2
-rw-r--r--include/hw/ppc/spapr_rtas.h10
-rw-r--r--include/hw/ppc/spapr_tpm_proxy.h7
-rw-r--r--include/hw/ppc/spapr_vio.h49
-rw-r--r--include/hw/ppc/spapr_xive.h15
-rw-r--r--include/hw/ppc/vof.h65
-rw-r--r--include/hw/ppc/xics.h33
-rw-r--r--include/hw/ppc/xics_spapr.h7
-rw-r--r--include/hw/ppc/xive.h151
-rw-r--r--include/hw/ppc/xive2.h111
-rw-r--r--include/hw/ppc/xive2_regs.h212
-rw-r--r--include/hw/ppc/xive_regs.h18
-rw-r--r--include/hw/ptimer.h38
-rw-r--r--include/hw/qdev-clock.h25
-rw-r--r--include/hw/qdev-core.h701
-rw-r--r--include/hw/qdev-properties-system.h97
-rw-r--r--include/hw/qdev-properties.h275
-rw-r--r--include/hw/rdma/rdma.h40
-rw-r--r--include/hw/register.h15
-rw-r--r--include/hw/registerfields.h126
-rw-r--r--include/hw/remote/iohub.h42
-rw-r--r--include/hw/remote/iommu.h40
-rw-r--r--include/hw/remote/machine.h42
-rw-r--r--include/hw/remote/memory.h19
-rw-r--r--include/hw/remote/mpqemu-link.h99
-rw-r--r--include/hw/remote/proxy-memory-listener.h28
-rw-r--r--include/hw/remote/proxy.h48
-rw-r--r--include/hw/remote/vfio-user-obj.h6
-rw-r--r--include/hw/resettable.h11
-rw-r--r--include/hw/riscv/boot.h46
-rw-r--r--include/hw/riscv/boot_opensbi.h7
-rw-r--r--include/hw/riscv/microchip_pfsoc.h168
-rw-r--r--include/hw/riscv/numa.h114
-rw-r--r--include/hw/riscv/opentitan.h106
-rw-r--r--include/hw/riscv/riscv_hart.h11
-rw-r--r--include/hw/riscv/shakti_c.h75
-rw-r--r--include/hw/riscv/sifive_clint.h57
-rw-r--r--include/hw/riscv/sifive_e.h61
-rw-r--r--include/hw/riscv/sifive_u.h80
-rw-r--r--include/hw/riscv/spike.h26
-rw-r--r--include/hw/riscv/virt.h101
-rw-r--r--include/hw/rtc/allwinner-rtc.h15
-rw-r--r--include/hw/rtc/aspeed_rtc.h9
-rw-r--r--include/hw/rtc/goldfish_rtc.h10
-rw-r--r--include/hw/rtc/m48t59.h19
-rw-r--r--include/hw/rtc/mc146818rtc.h25
-rw-r--r--include/hw/rtc/pl031.h7
-rw-r--r--include/hw/rtc/sun4v-rtc.h6
-rw-r--r--include/hw/rtc/xlnx-zynqmp-rtc.h12
-rw-r--r--include/hw/rx/rx62n.h15
-rw-r--r--include/hw/s390x/3270-ccw.h16
-rw-r--r--include/hw/s390x/ap-device.h12
-rw-r--r--include/hw/s390x/cpu-topology.h83
-rw-r--r--include/hw/s390x/css-bridge.h14
-rw-r--r--include/hw/s390x/css.h19
-rw-r--r--include/hw/s390x/event-facility.h35
-rw-r--r--include/hw/s390x/ioinst.h14
-rw-r--r--include/hw/s390x/pv.h58
-rw-r--r--include/hw/s390x/s390-ccw.h19
-rw-r--r--include/hw/s390x/s390-pci-bus.h406
-rw-r--r--include/hw/s390x/s390-pci-clp.h216
-rw-r--r--include/hw/s390x/s390-pci-inst.h119
-rw-r--r--include/hw/s390x/s390-pci-kvm.h38
-rw-r--r--include/hw/s390x/s390-pci-vfio.h44
-rw-r--r--include/hw/s390x/s390-virtio-ccw.h21
-rw-r--r--include/hw/s390x/s390_flic.h32
-rw-r--r--include/hw/s390x/sclp.h44
-rw-r--r--include/hw/s390x/storage-attributes.h36
-rw-r--r--include/hw/s390x/storage-keys.h90
-rw-r--r--include/hw/s390x/tod.h17
-rw-r--r--include/hw/s390x/vfio-ccw.h7
-rw-r--r--include/hw/scsi/esp.h66
-rw-r--r--include/hw/scsi/scsi.h108
-rw-r--r--include/hw/sd/allwinner-sdhost.h31
-rw-r--r--include/hw/sd/aspeed_sdhci.h10
-rw-r--r--include/hw/sd/bcm2835_sdhost.h8
-rw-r--r--include/hw/sd/cadence_sdhci.h47
-rw-r--r--include/hw/sd/npcm7xx_sdhci.h65
-rw-r--r--include/hw/sd/sd.h154
-rw-r--r--include/hw/sd/sdcard_legacy.h50
-rw-r--r--include/hw/sd/sdhci.h16
-rw-r--r--include/hw/semihosting/console.h69
-rw-r--r--include/hw/sensor/emc141x_regs.h37
-rw-r--r--include/hw/sensor/isl_pmbus_vr.h57
-rw-r--r--include/hw/sensor/tmp105.h55
-rw-r--r--include/hw/sensor/tmp105_regs.h (renamed from include/hw/misc/tmp105_regs.h)0
-rw-r--r--include/hw/sh4/sh.h50
-rw-r--r--include/hw/sh4/sh_intc.h2
-rw-r--r--include/hw/southbridge/ich9.h (renamed from include/hw/i386/ich9.h)56
-rw-r--r--include/hw/southbridge/piix.h43
-rw-r--r--include/hw/sparc/sparc32_dma.h34
-rw-r--r--include/hw/sparc/sun4m_iommu.h9
-rw-r--r--include/hw/sparc/sun4u_iommu.h9
-rw-r--r--include/hw/ssi/aspeed_smc.h100
-rw-r--r--include/hw/ssi/bcm2835_spi.h81
-rw-r--r--include/hw/ssi/ibex_spi_host.h92
-rw-r--r--include/hw/ssi/imx_spi.h12
-rw-r--r--include/hw/ssi/mss-spi.h7
-rw-r--r--include/hw/ssi/npcm7xx_fiu.h73
-rw-r--r--include/hw/ssi/npcm_pspi.h53
-rw-r--r--include/hw/ssi/pl022.h12
-rw-r--r--include/hw/ssi/sifive_spi.h50
-rw-r--r--include/hw/ssi/ssi.h71
-rw-r--r--include/hw/ssi/stm32f2xx_spi.h8
-rw-r--r--include/hw/ssi/xilinx_spips.h36
-rw-r--r--include/hw/ssi/xlnx-versal-ospi.h111
-rw-r--r--include/hw/stream.h44
-rw-r--r--include/hw/sysbus.h20
-rw-r--r--include/hw/timer/a9gtimer.h4
-rw-r--r--include/hw/timer/allwinner-a10-pit.h4
-rw-r--r--include/hw/timer/arm_mptimer.h8
-rw-r--r--include/hw/timer/armv7m_systick.h46
-rw-r--r--include/hw/timer/aspeed_timer.h17
-rw-r--r--include/hw/timer/avr_timer16.h9
-rw-r--r--include/hw/timer/bcm2835_systmr.h27
-rw-r--r--include/hw/timer/cadence_ttc.h54
-rw-r--r--include/hw/timer/cmsdk-apb-dualtimer.h10
-rw-r--r--include/hw/timer/cmsdk-apb-timer.h39
-rw-r--r--include/hw/timer/digic-timer.h7
-rw-r--r--include/hw/timer/grlib_gptimer.h32
-rw-r--r--include/hw/timer/hpet.h2
-rw-r--r--include/hw/timer/i8254.h11
-rw-r--r--include/hw/timer/i8254_internal.h10
-rw-r--r--include/hw/timer/ibex_timer.h55
-rw-r--r--include/hw/timer/imx_epit.h15
-rw-r--r--include/hw/timer/imx_gpt.h10
-rw-r--r--include/hw/timer/mss-timer.h8
-rw-r--r--include/hw/timer/npcm7xx_timer.h113
-rw-r--r--include/hw/timer/nrf51_timer.h7
-rw-r--r--include/hw/timer/renesas_cmt.h9
-rw-r--r--include/hw/timer/renesas_tmr.h9
-rw-r--r--include/hw/timer/sifive_pwm.h62
-rw-r--r--include/hw/timer/sse-counter.h105
-rw-r--r--include/hw/timer/sse-timer.h54
-rw-r--r--include/hw/timer/stellaris-gptm.h51
-rw-r--r--include/hw/timer/stm32f2xx_timer.h10
-rw-r--r--include/hw/tricore/tc27x_soc.h129
-rw-r--r--include/hw/tricore/triboard.h48
-rw-r--r--include/hw/tricore/tricore_testdevice.h (renamed from include/exec/tb-context.h)31
-rw-r--r--include/hw/unicore32/puv3.h40
-rw-r--r--include/hw/usb.h161
-rw-r--r--include/hw/usb/chipidea.h7
-rw-r--r--include/hw/usb/dwc2-regs.h1618
-rw-r--r--include/hw/usb/hcd-dwc3.h56
-rw-r--r--include/hw/usb/hcd-musb.h6
-rw-r--r--include/hw/usb/hid.h17
-rw-r--r--include/hw/usb/imx-usb-phy.h7
-rw-r--r--include/hw/usb/msd.h55
-rw-r--r--include/hw/usb/xhci.h21
-rw-r--r--include/hw/usb/xlnx-usb-subsystem.h47
-rw-r--r--include/hw/usb/xlnx-versal-usb2-ctrl-regs.h48
-rw-r--r--include/hw/vfio/vfio-amd-xgbe.h11
-rw-r--r--include/hw/vfio/vfio-calxeda-xgmac.h21
-rw-r--r--include/hw/vfio/vfio-common.h126
-rw-r--r--include/hw/vfio/vfio-container-base.h141
-rw-r--r--include/hw/vfio/vfio-platform.h19
-rw-r--r--include/hw/vfio/vfio.h7
-rw-r--r--include/hw/virtio/vdpa-dev.h43
-rw-r--r--include/hw/virtio/vhost-backend.h75
-rw-r--r--include/hw/virtio/vhost-scsi-common.h12
-rw-r--r--include/hw/virtio/vhost-scsi.h8
-rw-r--r--include/hw/virtio/vhost-user-base.h49
-rw-r--r--include/hw/virtio/vhost-user-blk.h21
-rw-r--r--include/hw/virtio/vhost-user-fs.h15
-rw-r--r--include/hw/virtio/vhost-user-gpio.h24
-rw-r--r--include/hw/virtio/vhost-user-i2c.h25
-rw-r--r--include/hw/virtio/vhost-user-rng.h24
-rw-r--r--include/hw/virtio/vhost-user-scmi.h31
-rw-r--r--include/hw/virtio/vhost-user-scsi.h14
-rw-r--r--include/hw/virtio/vhost-user-snd.h24
-rw-r--r--include/hw/virtio/vhost-user-vsock.h14
-rw-r--r--include/hw/virtio/vhost-user.h89
-rw-r--r--include/hw/virtio/vhost-vdpa.h79
-rw-r--r--include/hw/virtio/vhost-vsock-common.h22
-rw-r--r--include/hw/virtio/vhost-vsock.h8
-rw-r--r--include/hw/virtio/vhost.h346
-rw-r--r--include/hw/virtio/virtio-access.h8
-rw-r--r--include/hw/virtio/virtio-acpi.h15
-rw-r--r--include/hw/virtio/virtio-balloon.h8
-rw-r--r--include/hw/virtio/virtio-blk-common.h20
-rw-r--r--include/hw/virtio/virtio-blk.h40
-rw-r--r--include/hw/virtio/virtio-bus.h19
-rw-r--r--include/hw/virtio/virtio-crypto.h13
-rw-r--r--include/hw/virtio/virtio-dmabuf.h100
-rw-r--r--include/hw/virtio/virtio-gpu-bswap.h37
-rw-r--r--include/hw/virtio/virtio-gpu-pci.h5
-rw-r--r--include/hw/virtio/virtio-gpu.h177
-rw-r--r--include/hw/virtio/virtio-input.h44
-rw-r--r--include/hw/virtio/virtio-iommu.h27
-rw-r--r--include/hw/virtio/virtio-md-pci.h44
-rw-r--r--include/hw/virtio/virtio-mem.h74
-rw-r--r--include/hw/virtio/virtio-mmio.h22
-rw-r--r--include/hw/virtio/virtio-net.h45
-rw-r--r--include/hw/virtio/virtio-pci.h272
-rw-r--r--include/hw/virtio/virtio-pmem.h19
-rw-r--r--include/hw/virtio/virtio-rng.h8
-rw-r--r--include/hw/virtio/virtio-scsi.h93
-rw-r--r--include/hw/virtio/virtio-serial.h22
-rw-r--r--include/hw/virtio/virtio.h158
-rw-r--r--include/hw/vmstate-if.h11
-rw-r--r--include/hw/watchdog/allwinner-wdt.h123
-rw-r--r--include/hw/watchdog/cmsdk-apb-watchdog.h13
-rw-r--r--include/hw/watchdog/sbsa_gwdt.h79
-rw-r--r--include/hw/watchdog/wdt_aspeed.h24
-rw-r--r--include/hw/watchdog/wdt_diag288.h19
-rw-r--r--include/hw/watchdog/wdt_imx2.h13
-rw-r--r--include/hw/xen/arch_hvm.h5
-rw-r--r--include/hw/xen/interface/arch-arm.h509
-rw-r--r--include/hw/xen/interface/arch-x86/cpuid.h113
-rw-r--r--include/hw/xen/interface/arch-x86/xen-x86_32.h177
-rw-r--r--include/hw/xen/interface/arch-x86/xen-x86_64.h224
-rw-r--r--include/hw/xen/interface/arch-x86/xen.h378
-rw-r--r--include/hw/xen/interface/event_channel.h371
-rw-r--r--include/hw/xen/interface/features.h126
-rw-r--r--include/hw/xen/interface/grant_table.h669
-rw-r--r--include/hw/xen/interface/hvm/hvm_op.h378
-rw-r--r--include/hw/xen/interface/hvm/params.h301
-rw-r--r--include/hw/xen/interface/io/blkif.h41
-rw-r--r--include/hw/xen/interface/io/console.h29
-rw-r--r--include/hw/xen/interface/io/fbif.h39
-rw-r--r--include/hw/xen/interface/io/kbdif.h29
-rw-r--r--include/hw/xen/interface/io/netif.h119
-rw-r--r--include/hw/xen/interface/io/protocols.h19
-rw-r--r--include/hw/xen/interface/io/ring.h139
-rw-r--r--include/hw/xen/interface/io/usbif.h396
-rw-r--r--include/hw/xen/interface/io/xenbus.h29
-rw-r--r--include/hw/xen/interface/io/xs_wire.h147
-rw-r--r--include/hw/xen/interface/memory.h746
-rw-r--r--include/hw/xen/interface/physdev.h366
-rw-r--r--include/hw/xen/interface/sched.h185
-rw-r--r--include/hw/xen/interface/trace.h324
-rw-r--r--include/hw/xen/interface/vcpu.h231
-rw-r--r--include/hw/xen/interface/version.h96
-rw-r--r--include/hw/xen/interface/xen-compat.h29
-rw-r--r--include/hw/xen/interface/xen.h1032
-rw-r--r--include/hw/xen/xen-backend.h2
-rw-r--r--include/hw/xen/xen-block.h33
-rw-r--r--include/hw/xen/xen-bus-helper.h36
-rw-r--r--include/hw/xen/xen-bus.h67
-rw-r--r--include/hw/xen/xen-hvm-common.h98
-rw-r--r--include/hw/xen/xen-legacy-backend.h35
-rw-r--r--include/hw/xen/xen-x86.h15
-rw-r--r--include/hw/xen/xen.h35
-rw-r--r--include/hw/xen/xen_backend_ops.h408
-rw-r--r--include/hw/xen/xen_igd.h33
-rw-r--r--include/hw/xen/xen_native.h (renamed from include/hw/xen/xen_common.h)220
-rw-r--r--include/hw/xen/xen_pvdev.h8
-rw-r--r--include/io/channel-buffer.h7
-rw-r--r--include/io/channel-command.h37
-rw-r--r--include/io/channel-file.h25
-rw-r--r--include/io/channel-null.h55
-rw-r--r--include/io/channel-socket.h13
-rw-r--r--include/io/channel-tls.h8
-rw-r--r--include/io/channel-util.h25
-rw-r--r--include/io/channel-watch.h2
-rw-r--r--include/io/channel-websock.h7
-rw-r--r--include/io/channel.h349
-rw-r--r--include/io/dns-resolver.h15
-rw-r--r--include/io/net-listener.h14
-rw-r--r--include/io/task.h6
-rw-r--r--include/libdecnumber/dconfig.h2
-rw-r--r--include/libdecnumber/decNumber.h4
-rw-r--r--include/libdecnumber/decNumberLocal.h2
-rw-r--r--include/migration/blocker.h74
-rw-r--r--include/migration/client-options.h25
-rw-r--r--include/migration/colo.h19
-rw-r--r--include/migration/global_state.h2
-rw-r--r--include/migration/misc.h68
-rw-r--r--include/migration/qemu-file-types.h18
-rw-r--r--include/migration/register.h279
-rw-r--r--include/migration/snapshot.h54
-rw-r--r--include/migration/vmstate.h110
-rw-r--r--include/monitor/hmp-target.h17
-rw-r--r--include/monitor/hmp.h68
-rw-r--r--include/monitor/monitor.h33
-rw-r--r--include/monitor/qdev.h27
-rw-r--r--include/monitor/qmp-helpers.h29
-rw-r--r--include/net/can_emu.h19
-rw-r--r--include/net/can_host.h20
-rw-r--r--include/net/checksum.h7
-rw-r--r--include/net/eth.h61
-rw-r--r--include/net/filter.h13
-rw-r--r--include/net/net.h143
-rw-r--r--include/net/queue.h8
-rw-r--r--include/net/vhost-user.h1
-rw-r--r--include/net/vhost-vdpa.h1
-rw-r--r--include/net/vhost_net.h18
-rw-r--r--include/qapi/compat-policy.h45
-rw-r--r--include/qapi/error.h30
-rw-r--r--include/qapi/forward-visitor.h27
-rw-r--r--include/qapi/qmp/dispatch.h17
-rw-r--r--include/qapi/qmp/json-writer.h35
-rw-r--r--include/qapi/qmp/qbool.h6
-rw-r--r--include/qapi/qmp/qdict.h6
-rw-r--r--include/qapi/qmp/qerror.h56
-rw-r--r--include/qapi/qmp/qjson.h12
-rw-r--r--include/qapi/qmp/qlist.h6
-rw-r--r--include/qapi/qmp/qnull.h4
-rw-r--r--include/qapi/qmp/qnum.h7
-rw-r--r--include/qapi/qmp/qobject.h19
-rw-r--r--include/qapi/qmp/qstring.h18
-rw-r--r--include/qapi/string-output-visitor.h6
-rw-r--r--include/qapi/type-helpers.h22
-rw-r--r--include/qapi/util.h49
-rw-r--r--include/qapi/visitor-impl.h11
-rw-r--r--include/qapi/visitor.h38
-rw-r--r--include/qemu-common.h155
-rw-r--r--include/qemu-main.h11
-rw-r--r--include/qemu/accel.h (renamed from include/sysemu/accel.h)52
-rw-r--r--include/qemu/async-teardown.h20
-rw-r--r--include/qemu/atomic.h439
-rw-r--r--include/qemu/atomic128.h135
-rw-r--r--include/qemu/base64.h2
-rw-r--r--include/qemu/bitmap.h53
-rw-r--r--include/qemu/bitops.h75
-rw-r--r--include/qemu/bswap.h230
-rw-r--r--include/qemu/buffer.h4
-rw-r--r--include/qemu/cacheflush.h35
-rw-r--r--include/qemu/cacheinfo.h21
-rw-r--r--include/qemu/chardev_open.h16
-rw-r--r--include/qemu/clang-tsa.h114
-rw-r--r--include/qemu/co-shared-resource.h4
-rw-r--r--include/qemu/compiler.h178
-rw-r--r--include/qemu/config-file.h17
-rw-r--r--include/qemu/coroutine-core.h154
-rw-r--r--include/qemu/coroutine-tls.h165
-rw-r--r--include/qemu/coroutine.h255
-rw-r--r--include/qemu/cpu-float.h64
-rw-r--r--include/qemu/cpuid.h48
-rw-r--r--include/qemu/crc-ccitt.h33
-rw-r--r--include/qemu/crc32c.h1
-rw-r--r--include/qemu/cutils.h95
-rw-r--r--include/qemu/datadir.h28
-rw-r--r--include/qemu/dbus.h23
-rw-r--r--include/qemu/defer-call.h16
-rw-r--r--include/qemu/envlist.h8
-rw-r--r--include/qemu/error-report.h26
-rw-r--r--include/qemu/event_notifier.h2
-rw-r--r--include/qemu/fifo8.h55
-rw-r--r--include/qemu/filemonitor.h2
-rw-r--r--include/qemu/guest-random.h8
-rw-r--r--include/qemu/hbitmap.h27
-rw-r--r--include/qemu/help-texts.h13
-rw-r--r--include/qemu/help_option.h11
-rw-r--r--include/qemu/host-utils.h469
-rw-r--r--include/qemu/hw-version.h27
-rw-r--r--include/qemu/id.h1
-rw-r--r--include/qemu/int128.h207
-rw-r--r--include/qemu/interval-tree.h99
-rw-r--r--include/qemu/iov.h31
-rw-r--r--include/qemu/iova-tree.h52
-rw-r--r--include/qemu/job.h338
-rw-r--r--include/qemu/keyval.h15
-rw-r--r--include/qemu/lockable.h90
-rw-r--r--include/qemu/log-for-trace.h2
-rw-r--r--include/qemu/log.h94
-rw-r--r--include/qemu/madvise.h95
-rw-r--r--include/qemu/main-loop.h160
-rw-r--r--include/qemu/memalign.h61
-rw-r--r--include/qemu/mmap-alloc.h48
-rw-r--r--include/qemu/module.h126
-rw-r--r--include/qemu/mprotect.h14
-rw-r--r--include/qemu/notify.h8
-rw-r--r--include/qemu/nvdimm-utils.h1
-rw-r--r--include/qemu/option.h7
-rw-r--r--include/qemu/osdep.h515
-rw-r--r--include/qemu/plugin-event.h26
-rw-r--r--include/qemu/plugin-memory.h22
-rw-r--r--include/qemu/plugin.h133
-rw-r--r--include/qemu/processor.h2
-rw-r--r--include/qemu/progress_meter.h34
-rw-r--r--include/qemu/qemu-plugin.h599
-rw-r--r--include/qemu/qemu-print.h8
-rw-r--r--include/qemu/qemu-progress.h8
-rw-r--r--include/qemu/qtree.h200
-rw-r--r--include/qemu/queue.h7
-rw-r--r--include/qemu/range.h18
-rw-r--r--include/qemu/ratelimit.h26
-rw-r--r--include/qemu/rcu.h56
-rw-r--r--include/qemu/rcu_queue.h108
-rw-r--r--include/qemu/readline.h4
-rw-r--r--include/qemu/reserved-region.h32
-rw-r--r--include/qemu/selfmap.h30
-rw-r--r--include/qemu/seqlock.h8
-rw-r--r--include/qemu/sockets.h48
-rw-r--r--include/qemu/stats64.h36
-rw-r--r--include/qemu/sys_membarrier.h4
-rw-r--r--include/qemu/thread-context.h57
-rw-r--r--include/qemu/thread-posix.h23
-rw-r--r--include/qemu/thread-win32.h6
-rw-r--r--include/qemu/thread.h74
-rw-r--r--include/qemu/throttle.h16
-rw-r--r--include/qemu/timer.h87
-rw-r--r--include/qemu/transactions.h66
-rw-r--r--include/qemu/typedefs.h28
-rw-r--r--include/qemu/uri.h52
-rw-r--r--include/qemu/userfaultfd.h46
-rw-r--r--include/qemu/uuid.h19
-rw-r--r--include/qemu/vfio-helpers.h4
-rw-r--r--include/qemu/vhost-user-server.h73
-rw-r--r--include/qemu/win_dump_defs.h115
-rw-r--r--include/qemu/xattr.h8
-rw-r--r--include/qemu/xxhash.h121
-rw-r--r--include/qemu/yank.h87
-rw-r--r--include/qom/object.h871
-rw-r--r--include/qom/object_interfaces.h106
-rw-r--r--include/scsi/constants.h4
-rw-r--r--include/scsi/pr-manager.h17
-rw-r--r--include/scsi/utils.h31
-rw-r--r--include/semihosting/common-semi.h39
-rw-r--r--include/semihosting/console.h59
-rw-r--r--include/semihosting/guestfd.h91
-rw-r--r--include/semihosting/semihost.h (renamed from include/hw/semihosting/semihost.h)28
-rw-r--r--include/semihosting/syscalls.h75
-rw-r--r--include/semihosting/uaccess.h63
-rw-r--r--include/standard-headers/asm-m68k/bootinfo-mac.h120
-rw-r--r--include/standard-headers/asm-m68k/bootinfo-virt.h21
-rw-r--r--include/standard-headers/asm-m68k/bootinfo.h172
-rw-r--r--include/standard-headers/asm-x86/bootparam.h15
-rw-r--r--include/standard-headers/asm-x86/kvm_para.h18
-rw-r--r--include/standard-headers/asm-x86/setup_data.h83
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h685
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h114
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h383
-rw-r--r--include/standard-headers/drm/drm_fourcc.h666
-rw-r--r--include/standard-headers/linux/const.h36
-rw-r--r--include/standard-headers/linux/ethtool.h441
-rw-r--r--include/standard-headers/linux/fuse.h322
-rw-r--r--include/standard-headers/linux/input-event-codes.h38
-rw-r--r--include/standard-headers/linux/input.h14
-rw-r--r--include/standard-headers/linux/kernel.h9
-rw-r--r--include/standard-headers/linux/pci_regs.h228
-rw-r--r--include/standard-headers/linux/pvpanic.h9
-rw-r--r--include/standard-headers/linux/udmabuf.h32
-rw-r--r--include/standard-headers/linux/vhost_types.h66
-rw-r--r--include/standard-headers/linux/virtio_9p.h6
-rw-r--r--include/standard-headers/linux/virtio_blk.h150
-rw-r--r--include/standard-headers/linux/virtio_bt.h39
-rw-r--r--include/standard-headers/linux/virtio_config.h38
-rw-r--r--include/standard-headers/linux/virtio_console.h8
-rw-r--r--include/standard-headers/linux/virtio_crypto.h82
-rw-r--r--include/standard-headers/linux/virtio_fs.h3
-rw-r--r--include/standard-headers/linux/virtio_gpio.h72
-rw-r--r--include/standard-headers/linux/virtio_gpu.h121
-rw-r--r--include/standard-headers/linux/virtio_i2c.h47
-rw-r--r--include/standard-headers/linux/virtio_ids.h70
-rw-r--r--include/standard-headers/linux/virtio_iommu.h8
-rw-r--r--include/standard-headers/linux/virtio_mem.h9
-rw-r--r--include/standard-headers/linux/virtio_mmio.h11
-rw-r--r--include/standard-headers/linux/virtio_net.h59
-rw-r--r--include/standard-headers/linux/virtio_pci.h92
-rw-r--r--include/standard-headers/linux/virtio_pcidev.h65
-rw-r--r--include/standard-headers/linux/virtio_pmem.h7
-rw-r--r--include/standard-headers/linux/virtio_ring.h16
-rw-r--r--include/standard-headers/linux/virtio_scmi.h24
-rw-r--r--include/standard-headers/linux/virtio_scsi.h20
-rw-r--r--include/standard-headers/linux/virtio_snd.h488
-rw-r--r--include/standard-headers/linux/virtio_vsock.h10
-rw-r--r--include/standard-headers/rdma/vmw_pvrdma-abi.h303
-rw-r--r--include/sysemu/accel-blocker.h55
-rw-r--r--include/sysemu/accel-ops.h58
-rw-r--r--include/sysemu/arch_init.h11
-rw-r--r--include/sysemu/balloon.h2
-rw-r--r--include/sysemu/block-backend-common.h103
-rw-r--r--include/sysemu/block-backend-global-state.h133
-rw-r--r--include/sysemu/block-backend-io.h230
-rw-r--r--include/sysemu/block-backend.h256
-rw-r--r--include/sysemu/block-ram-registrar.h37
-rw-r--r--include/sysemu/blockdev.h18
-rw-r--r--include/sysemu/cpu-timers-internal.h71
-rw-r--r--include/sysemu/cpu-timers.h103
-rw-r--r--include/sysemu/cpus.h54
-rw-r--r--include/sysemu/cryptodev-vhost-user.h2
-rw-r--r--include/sysemu/cryptodev-vhost.h4
-rw-r--r--include/sysemu/cryptodev.h243
-rw-r--r--include/sysemu/device_tree.h34
-rw-r--r--include/sysemu/dirtylimit.h39
-rw-r--r--include/sysemu/dirtyrate.h30
-rw-r--r--include/sysemu/dma.h232
-rw-r--r--include/sysemu/dump-arch.h4
-rw-r--r--include/sysemu/dump.h42
-rw-r--r--include/sysemu/event-loop-base.h40
-rw-r--r--include/sysemu/hax.h54
-rw-r--r--include/sysemu/hostmem.h25
-rw-r--r--include/sysemu/hvf.h56
-rw-r--r--include/sysemu/hvf_int.h70
-rw-r--r--include/sysemu/hw_accel.h71
-rw-r--r--include/sysemu/iommufd.h36
-rw-r--r--include/sysemu/iothread.h18
-rw-r--r--include/sysemu/kvm.h194
-rw-r--r--include/sysemu/kvm_int.h102
-rw-r--r--include/sysemu/kvm_xen.h44
-rw-r--r--include/sysemu/memory_mapping.h7
-rw-r--r--include/sysemu/numa.h5
-rw-r--r--include/sysemu/nvmm.h29
-rw-r--r--include/sysemu/os-posix.h22
-rw-r--r--include/sysemu/os-win32.h130
-rw-r--r--include/sysemu/qtest.h8
-rw-r--r--include/sysemu/replay.h82
-rw-r--r--include/sysemu/reset.h118
-rw-r--r--include/sysemu/rng-random.h3
-rw-r--r--include/sysemu/rng.h10
-rw-r--r--include/sysemu/rtc.h58
-rw-r--r--include/sysemu/runstate-action.h19
-rw-r--r--include/sysemu/runstate.h53
-rw-r--r--include/sysemu/sev.h21
-rw-r--r--include/sysemu/stats.h45
-rw-r--r--include/sysemu/sysemu.h33
-rw-r--r--include/sysemu/tcg.h3
-rw-r--r--include/sysemu/tpm.h31
-rw-r--r--include/sysemu/tpm_backend.h18
-rw-r--r--include/sysemu/tpm_util.h2
-rw-r--r--include/sysemu/vhost-user-backend.h17
-rw-r--r--include/sysemu/watchdog.h13
-rw-r--r--include/sysemu/whpx.h23
-rw-r--r--include/sysemu/xen-mapcache.h3
-rw-r--r--include/sysemu/xen.h12
-rw-r--r--include/tcg/debug-assert.h17
-rw-r--r--include/tcg/debuginfo.h79
-rw-r--r--include/tcg/helper-info.h64
-rw-r--r--include/tcg/insn-start-words.h17
-rw-r--r--include/tcg/oversized-guest.h23
-rw-r--r--include/tcg/perf.h49
-rw-r--r--include/tcg/startup.h58
-rw-r--r--include/tcg/tcg-cond.h133
-rw-r--r--include/tcg/tcg-gvec-desc.h36
-rw-r--r--include/tcg/tcg-ldst.h63
-rw-r--r--include/tcg/tcg-op-common.h561
-rw-r--r--include/tcg/tcg-op-gvec-common.h432
-rw-r--r--include/tcg/tcg-op-gvec.h423
-rw-r--r--include/tcg/tcg-op.h1171
-rw-r--r--include/tcg/tcg-opc.h94
-rw-r--r--include/tcg/tcg-temp-internal.h45
-rw-r--r--include/tcg/tcg.h911
-rw-r--r--include/trace-tcg.h6
-rw-r--r--include/ui/clipboard.h277
-rw-r--r--include/ui/console.h293
-rw-r--r--include/ui/dbus-display.h17
-rw-r--r--include/ui/dbus-module.h11
-rw-r--r--include/ui/egl-context.h7
-rw-r--r--include/ui/egl-helpers.h28
-rw-r--r--include/ui/gtk.h98
-rw-r--r--include/ui/input.h12
-rw-r--r--include/ui/kbd-state.h13
-rw-r--r--include/ui/pixman-minimal.h239
-rw-r--r--include/ui/qemu-pixman.h31
-rw-r--r--include/ui/qemu-spice-module.h44
-rw-r--r--include/ui/qemu-spice.h51
-rw-r--r--include/ui/rect.h57
-rw-r--r--include/ui/sdl2.h22
-rw-r--r--include/ui/spice-display.h11
-rw-r--r--include/ui/surface.h95
-rw-r--r--include/user/safe-syscall.h140
-rw-r--r--include/user/syscall-trace.h21
1169 files changed, 69495 insertions, 23407 deletions
diff --git a/include/authz/base.h b/include/authz/base.h
index 0782981ad8..b53e4e4507 100644
--- a/include/authz/base.h
+++ b/include/authz/base.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -27,18 +27,9 @@
#define TYPE_QAUTHZ "authz"
-#define QAUTHZ_CLASS(klass) \
- OBJECT_CLASS_CHECK(QAuthZClass, (klass), \
- TYPE_QAUTHZ)
-#define QAUTHZ_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QAuthZClass, (obj), \
- TYPE_QAUTHZ)
-#define QAUTHZ(obj) \
- OBJECT_CHECK(QAuthZ, (obj), \
- TYPE_QAUTHZ)
-
-typedef struct QAuthZ QAuthZ;
-typedef struct QAuthZClass QAuthZClass;
+OBJECT_DECLARE_TYPE(QAuthZ, QAuthZClass,
+ QAUTHZ)
+
/**
* QAuthZ:
diff --git a/include/authz/list.h b/include/authz/list.h
index a88cdbbcf8..7ef4ad4b4e 100644
--- a/include/authz/list.h
+++ b/include/authz/list.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,21 +23,13 @@
#include "authz/base.h"
#include "qapi/qapi-types-authz.h"
+#include "qom/object.h"
#define TYPE_QAUTHZ_LIST "authz-list"
-#define QAUTHZ_LIST_CLASS(klass) \
- OBJECT_CLASS_CHECK(QAuthZListClass, (klass), \
- TYPE_QAUTHZ_LIST)
-#define QAUTHZ_LIST_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QAuthZListClass, (obj), \
- TYPE_QAUTHZ_LIST)
-#define QAUTHZ_LIST(obj) \
- OBJECT_CHECK(QAuthZList, (obj), \
- TYPE_QAUTHZ_LIST)
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZList,
+ QAUTHZ_LIST)
-typedef struct QAuthZList QAuthZList;
-typedef struct QAuthZListClass QAuthZListClass;
/**
@@ -76,9 +68,6 @@ struct QAuthZList {
};
-struct QAuthZListClass {
- QAuthZClass parent_class;
-};
QAuthZList *qauthz_list_new(const char *id,
diff --git a/include/authz/listfile.h b/include/authz/listfile.h
index 24ae2e606c..0b7fe72198 100644
--- a/include/authz/listfile.h
+++ b/include/authz/listfile.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,21 +23,13 @@
#include "authz/list.h"
#include "qemu/filemonitor.h"
+#include "qom/object.h"
#define TYPE_QAUTHZ_LIST_FILE "authz-list-file"
-#define QAUTHZ_LIST_FILE_CLASS(klass) \
- OBJECT_CLASS_CHECK(QAuthZListFileClass, (klass), \
- TYPE_QAUTHZ_LIST_FILE)
-#define QAUTHZ_LIST_FILE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QAuthZListFileClass, (obj), \
- TYPE_QAUTHZ_LIST_FILE)
-#define QAUTHZ_LIST_FILE(obj) \
- OBJECT_CHECK(QAuthZListFile, (obj), \
- TYPE_QAUTHZ_LIST_FILE)
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZListFile,
+ QAUTHZ_LIST_FILE)
-typedef struct QAuthZListFile QAuthZListFile;
-typedef struct QAuthZListFileClass QAuthZListFileClass;
/**
@@ -81,7 +73,7 @@ typedef struct QAuthZListFileClass QAuthZListFileClass;
* The object can be created on the command line using
*
* -object authz-list-file,id=authz0,\
- * filename=/etc/qemu/myvm-vnc.acl,refresh=yes
+ * filename=/etc/qemu/myvm-vnc.acl,refresh=on
*
*/
struct QAuthZListFile {
@@ -95,9 +87,6 @@ struct QAuthZListFile {
};
-struct QAuthZListFileClass {
- QAuthZClass parent_class;
-};
QAuthZListFile *qauthz_list_file_new(const char *id,
diff --git a/include/authz/pamacct.h b/include/authz/pamacct.h
index f3a7ef1011..592edb2bf0 100644
--- a/include/authz/pamacct.h
+++ b/include/authz/pamacct.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,22 +22,14 @@
#define QAUTHZ_PAMACCT_H
#include "authz/base.h"
+#include "qom/object.h"
#define TYPE_QAUTHZ_PAM "authz-pam"
-#define QAUTHZ_PAM_CLASS(klass) \
- OBJECT_CLASS_CHECK(QAuthZPAMClass, (klass), \
- TYPE_QAUTHZ_PAM)
-#define QAUTHZ_PAM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QAuthZPAMClass, (obj), \
- TYPE_QAUTHZ_PAM)
-#define QAUTHZ_PAM(obj) \
- OBJECT_CHECK(QAuthZPAM, (obj), \
- TYPE_QAUTHZ_PAM)
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZPAM,
+ QAUTHZ_PAM)
-typedef struct QAuthZPAM QAuthZPAM;
-typedef struct QAuthZPAMClass QAuthZPAMClass;
/**
@@ -87,9 +79,6 @@ struct QAuthZPAM {
};
-struct QAuthZPAMClass {
- QAuthZClass parent_class;
-};
QAuthZPAM *qauthz_pam_new(const char *id,
diff --git a/include/authz/simple.h b/include/authz/simple.h
index 2b7ab0cdd9..c46a5ac5a1 100644
--- a/include/authz/simple.h
+++ b/include/authz/simple.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,21 +22,13 @@
#define QAUTHZ_SIMPLE_H
#include "authz/base.h"
+#include "qom/object.h"
#define TYPE_QAUTHZ_SIMPLE "authz-simple"
-#define QAUTHZ_SIMPLE_CLASS(klass) \
- OBJECT_CLASS_CHECK(QAuthZSimpleClass, (klass), \
- TYPE_QAUTHZ_SIMPLE)
-#define QAUTHZ_SIMPLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QAuthZSimpleClass, (obj), \
- TYPE_QAUTHZ_SIMPLE)
-#define QAUTHZ_SIMPLE(obj) \
- OBJECT_CHECK(QAuthZSimple, (obj), \
- TYPE_QAUTHZ_SIMPLE)
+OBJECT_DECLARE_SIMPLE_TYPE(QAuthZSimple,
+ QAUTHZ_SIMPLE)
-typedef struct QAuthZSimple QAuthZSimple;
-typedef struct QAuthZSimpleClass QAuthZSimpleClass;
/**
@@ -70,9 +62,6 @@ struct QAuthZSimple {
};
-struct QAuthZSimpleClass {
- QAuthZClass parent_class;
-};
QAuthZSimple *qauthz_simple_new(const char *id,
diff --git a/include/block/accounting.h b/include/block/accounting.h
index 878b4c3581..a59e39f49d 100644
--- a/include/block/accounting.h
+++ b/include/block/accounting.h
@@ -27,7 +27,7 @@
#include "qemu/timed-average.h"
#include "qemu/thread.h"
-#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-common.h"
typedef struct BlockAcctTimedStats BlockAcctTimedStats;
typedef struct BlockAcctStats BlockAcctStats;
@@ -37,6 +37,7 @@ enum BlockAcctType {
BLOCK_ACCT_READ,
BLOCK_ACCT_WRITE,
BLOCK_ACCT_FLUSH,
+ BLOCK_ACCT_ZONE_APPEND,
BLOCK_ACCT_UNMAP,
BLOCK_MAX_IOTYPE,
};
@@ -100,8 +101,8 @@ typedef struct BlockAcctCookie {
} BlockAcctCookie;
void block_acct_init(BlockAcctStats *stats);
-void block_acct_setup(BlockAcctStats *stats, bool account_invalid,
- bool account_failed);
+void block_acct_setup(BlockAcctStats *stats, enum OnOffAuto account_invalid,
+ enum OnOffAuto account_failed);
void block_acct_cleanup(BlockAcctStats *stats);
void block_acct_add_interval(BlockAcctStats *stats, unsigned interval_length);
BlockAcctTimedStats *block_acct_interval_next(BlockAcctStats *stats,
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index 716d2639df..cf5e8bde1c 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -59,7 +59,7 @@ typedef struct {
extern AioWait global_aio_wait;
/**
- * AIO_WAIT_WHILE:
+ * AIO_WAIT_WHILE_INTERNAL:
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
* caller does not hold a lock) are involved in the polling condition.
* @cond: wait while this conditional expression is true
@@ -75,12 +75,14 @@ extern AioWait global_aio_wait;
* wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead.
*/
-#define AIO_WAIT_WHILE(ctx, cond) ({ \
+#define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \
bool waited_ = false; \
AioWait *wait_ = &global_aio_wait; \
AioContext *ctx_ = (ctx); \
/* Increment wait_->num_waiters before evaluating cond. */ \
- atomic_inc(&wait_->num_waiters); \
+ qatomic_inc(&wait_->num_waiters); \
+ /* Paired with smp_mb in aio_wait_kick(). */ \
+ smp_mb__after_rmw(); \
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
while ((cond)) { \
aio_poll(ctx_, true); \
@@ -90,19 +92,20 @@ extern AioWait global_aio_wait;
assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \
while ((cond)) { \
- if (ctx_) { \
- aio_context_release(ctx_); \
- } \
aio_poll(qemu_get_aio_context(), true); \
- if (ctx_) { \
- aio_context_acquire(ctx_); \
- } \
waited_ = true; \
} \
} \
- atomic_dec(&wait_->num_waiters); \
+ qatomic_dec(&wait_->num_waiters); \
waited_; })
+#define AIO_WAIT_WHILE(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond)
+
+/* TODO replace this with AIO_WAIT_WHILE() in a future patch */
+#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond)
+
/**
* aio_wait_kick:
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
@@ -120,7 +123,7 @@ void aio_wait_kick(void);
*
* Run a BH in @ctx and wait for it to complete.
*
- * Must be called from the main loop thread with @ctx acquired exactly once.
+ * Must be called from the main loop thread without @ctx acquired.
* Note that main loop event processing may occur.
*/
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
@@ -140,7 +143,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx)
}
if (ctx == qemu_get_aio_context()) {
- return qemu_mutex_iothread_locked();
+ return bql_locked();
} else {
return false;
}
diff --git a/include/block/aio.h b/include/block/aio.h
index b2f703fa3f..8378553eb9 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -17,17 +17,20 @@
#ifdef CONFIG_LINUX_IO_URING
#include <liburing.h>
#endif
+#include "qemu/coroutine-core.h"
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
#include "qemu/thread.h"
#include "qemu/timer.h"
+#include "block/graph-lock.h"
+#include "hw/qdev-core.h"
+
typedef struct BlockAIOCB BlockAIOCB;
typedef void BlockCompletionFunc(void *opaque, int ret);
typedef struct AIOCBInfo {
void (*cancel_async)(BlockAIOCB *acb);
- AioContext *(*get_aio_context)(BlockAIOCB *acb);
size_t aiocb_size;
} AIOCBInfo;
@@ -50,10 +53,9 @@ typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
-struct Coroutine;
struct ThreadPool;
struct LinuxAioState;
-struct LuringState;
+typedef struct LuringState LuringState;
/* Is polling disabled? */
bool aio_poll_disabled(AioContext *ctx);
@@ -126,6 +128,14 @@ struct AioContext {
/* Used by AioContext users to protect from multi-threaded access. */
QemuRecMutex lock;
+ /*
+ * Keep track of readers and writers of the block layer graph.
+ * This is essential to avoid performing additions and removal
+ * of nodes and edges from block graph while some
+ * other thread is traversing it.
+ */
+ BdrvGraphRWlock *bdrv_graph;
+
/* The list of registered AIO handlers. Protected by ctx->list_lock. */
AioHandlerList aio_handlers;
@@ -191,24 +201,18 @@ struct AioContext {
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
QEMUBH *co_schedule_bh;
+ int thread_pool_min;
+ int thread_pool_max;
/* Thread pool for performing work and receiving completion callbacks.
* Has its own locking.
*/
struct ThreadPool *thread_pool;
#ifdef CONFIG_LINUX_AIO
- /*
- * State for native Linux AIO. Uses aio_context_acquire/release for
- * locking.
- */
struct LinuxAioState *linux_aio;
#endif
#ifdef CONFIG_LINUX_IO_URING
- /*
- * State for Linux io_uring. Uses aio_context_acquire/release for
- * locking.
- */
- struct LuringState *linux_io_uring;
+ LuringState *linux_io_uring;
/* State for file descriptor monitoring using Linux io_uring */
struct io_uring fdmon_io_uring;
@@ -220,8 +224,6 @@ struct AioContext {
*/
QEMUTimerListGroup tlg;
- int external_disable_cnt;
-
/* Number of AioHandlers without .io_poll() */
int poll_disable_cnt;
@@ -231,6 +233,9 @@ struct AioContext {
int64_t poll_grow; /* polling time growth factor */
int64_t poll_shrink; /* polling time shrink factor */
+ /* AIO engine parameters */
+ int64_t aio_max_batch; /* maximum number of requests in a batch */
+
/*
* List of handlers participating in userspace polling. Protected by
* ctx->list_lock. Iterated and modified mostly by the event loop thread
@@ -273,37 +278,57 @@ void aio_context_ref(AioContext *ctx);
*/
void aio_context_unref(AioContext *ctx);
-/* Take ownership of the AioContext. If the AioContext will be shared between
- * threads, and a thread does not want to be interrupted, it will have to
- * take ownership around calls to aio_poll(). Otherwise, aio_poll()
- * automatically takes care of calling aio_context_acquire and
- * aio_context_release.
- *
- * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
- * thread still has to call those to avoid being interrupted by the guest.
+/**
+ * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
+ * run only once and as soon as possible.
*
- * Bottom halves, timers and callbacks can be created or removed without
- * acquiring the AioContext.
+ * @name: A human-readable identifier for debugging purposes.
*/
-void aio_context_acquire(AioContext *ctx);
-
-/* Relinquish ownership of the AioContext. */
-void aio_context_release(AioContext *ctx);
+void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
+ const char *name);
/**
* aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
* only once and as soon as possible.
+ *
+ * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
+ * name string.
*/
-void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+#define aio_bh_schedule_oneshot(ctx, cb, opaque) \
+ aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
/**
- * aio_bh_new: Allocate a new bottom half structure.
+ * aio_bh_new_full: Allocate a new bottom half structure.
*
* Bottom halves are lightweight callbacks whose invocation is guaranteed
* to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
* is opaque and must be allocated prior to its use.
+ *
+ * @name: A human-readable identifier for debugging purposes.
+ * @reentrancy_guard: A guard set when entering a cb to prevent
+ * device-reentrancy issues
*/
-QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
+ const char *name, MemReentrancyGuard *reentrancy_guard);
+
+/**
+ * aio_bh_new: Allocate a new bottom half structure
+ *
+ * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
+ * string.
+ */
+#define aio_bh_new(ctx, cb, opaque) \
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
+
+/**
+ * aio_bh_new_guarded: Allocate a new bottom half structure with a
+ * reentrancy_guard
+ *
+ * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
+ * string.
+ */
+#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
+ aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
/**
* aio_notify: Force processing of pending events.
@@ -425,7 +450,7 @@ void aio_dispatch(AioContext *ctx);
* or more AIO events have completed, to ensure something has moved
* before returning.
*/
-bool aio_poll(AioContext *ctx, bool blocking);
+bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking);
/* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
@@ -436,20 +461,12 @@ bool aio_poll(AioContext *ctx, bool blocking);
*/
void aio_set_fd_handler(AioContext *ctx,
int fd,
- bool is_external,
IOHandler *io_read,
IOHandler *io_write,
AioPollFn *io_poll,
+ IOHandler *io_poll_ready,
void *opaque);
-/* Set polling begin/end callbacks for a file descriptor that has already been
- * registered with aio_set_fd_handler. Do nothing if the file descriptor is
- * not registered.
- */
-void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end);
-
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
* will be invoked when using aio_poll().
@@ -459,13 +476,18 @@ void aio_set_fd_poll(AioContext *ctx, int fd,
*/
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
- bool is_external,
EventNotifierHandler *io_read,
- AioPollFn *io_poll);
+ AioPollFn *io_poll,
+ EventNotifierHandler *io_poll_ready);
-/* Set polling begin/end callbacks for an event notifier that has already been
+/*
+ * Set polling begin/end callbacks for an event notifier that has already been
* registered with aio_set_event_notifier. Do nothing if the event notifier is
* not registered.
+ *
+ * Note that if the io_poll_end() callback (or the entire notifier) is removed
+ * during polling, it will not be called, so an io_poll_begin() is not
+ * necessarily always followed by an io_poll_end().
*/
void aio_set_event_notifier_poll(AioContext *ctx,
EventNotifier *notifier,
@@ -487,10 +509,10 @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
/* Setup the LuringState bound to this AioContext */
-struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
+LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
/* Return the LuringState bound to this AioContext */
-struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
+LuringState *aio_get_linux_io_uring(AioContext *ctx);
/**
* aio_timer_new_with_attrs:
* @ctx: the aio context
@@ -588,59 +610,6 @@ static inline void aio_timer_init(AioContext *ctx,
int64_t aio_compute_timeout(AioContext *ctx);
/**
- * aio_disable_external:
- * @ctx: the aio context
- *
- * Disable the further processing of external clients.
- */
-static inline void aio_disable_external(AioContext *ctx)
-{
- atomic_inc(&ctx->external_disable_cnt);
-}
-
-/**
- * aio_enable_external:
- * @ctx: the aio context
- *
- * Enable the processing of external clients.
- */
-static inline void aio_enable_external(AioContext *ctx)
-{
- int old;
-
- old = atomic_fetch_dec(&ctx->external_disable_cnt);
- assert(old > 0);
- if (old == 1) {
- /* Kick event loop so it re-arms file descriptors */
- aio_notify(ctx);
- }
-}
-
-/**
- * aio_external_disabled:
- * @ctx: the aio context
- *
- * Return true if the external clients are disabled.
- */
-static inline bool aio_external_disabled(AioContext *ctx)
-{
- return atomic_read(&ctx->external_disable_cnt);
-}
-
-/**
- * aio_node_check:
- * @ctx: the aio context
- * @is_external: Whether or not the checked node is an external event source.
- *
- * Check if the node's is_external flag is okay to be polled by the ctx at this
- * moment. True means green light.
- */
-static inline bool aio_node_check(AioContext *ctx, bool is_external)
-{
- return !is_external || !atomic_read(&ctx->external_disable_cnt);
-}
-
-/**
* aio_co_schedule:
* @ctx: the aio context
* @co: the coroutine
@@ -652,7 +621,16 @@ static inline bool aio_node_check(AioContext *ctx, bool is_external)
* is the context in which the coroutine is running (i.e. the value of
* qemu_get_current_aio_context() from the coroutine itself).
*/
-void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
+void aio_co_schedule(AioContext *ctx, Coroutine *co);
+
+/**
+ * aio_co_reschedule_self:
+ * @new_ctx: the new context
+ *
+ * Move the currently running coroutine to new_ctx. If the coroutine is already
+ * running in new_ctx, do nothing.
+ */
+void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
/**
* aio_co_wake:
@@ -666,7 +644,7 @@ void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
* context. The coroutine must not be entered by anyone else while
* aio_co_wake() is active.
*/
-void aio_co_wake(struct Coroutine *co);
+void aio_co_wake(Coroutine *co);
/**
* aio_co_enter:
@@ -675,16 +653,19 @@ void aio_co_wake(struct Coroutine *co);
*
* Enter a coroutine in the specified AioContext.
*/
-void aio_co_enter(AioContext *ctx, struct Coroutine *co);
+void aio_co_enter(AioContext *ctx, Coroutine *co);
/**
* Return the AioContext whose event loop runs in the current thread.
*
* If called from an IOThread this will be the IOThread's AioContext. If
- * called from another thread it will be the main loop AioContext.
+ * called from the main thread or with the "big QEMU lock" taken it
+ * will be the main loop AioContext.
*/
AioContext *qemu_get_current_aio_context(void);
+void qemu_set_current_aio_context(AioContext *ctx);
+
/**
* aio_context_setup:
* @ctx: the aio context
@@ -717,4 +698,20 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
int64_t grow, int64_t shrink,
Error **errp);
+/**
+ * aio_context_set_aio_params:
+ * @ctx: the aio context
+ * @max_batch: maximum number of requests in a batch, 0 means that the
+ * engine will use its default
+ */
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
+
+/**
+ * aio_context_set_thread_pool_params:
+ * @ctx: the aio context
+ * @min: min number of threads to have readily available in the thread pool
+ * @min: max number of threads the thread pool can contain
+ */
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
+ int64_t max, Error **errp);
#endif
diff --git a/include/block/aio_task.h b/include/block/aio_task.h
index 50bc1e1817..18a9c41f4e 100644
--- a/include/block/aio_task.h
+++ b/include/block/aio_task.h
@@ -25,8 +25,6 @@
#ifndef BLOCK_AIO_TASK_H
#define BLOCK_AIO_TASK_H
-#include "qemu/coroutine.h"
-
typedef struct AioTaskPool AioTaskPool;
typedef struct AioTask AioTask;
typedef int coroutine_fn (*AioTaskFunc)(AioTask *task);
diff --git a/include/block/block-common.h b/include/block/block-common.h
new file mode 100644
index 0000000000..a846023a09
--- /dev/null
+++ b/include/block/block-common.h
@@ -0,0 +1,565 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_COMMON_H
+#define BLOCK_COMMON_H
+
+#include "qapi/qapi-types-block-core.h"
+#include "qemu/queue.h"
+
+/*
+ * co_wrapper{*}: Function specifiers used by block-coroutine-wrapper.py
+ *
+ * Function specifiers, which do nothing but mark functions to be
+ * generated by scripts/block-coroutine-wrapper.py
+ *
+ * Usage: read docs/devel/block-coroutine-wrapper.rst
+ *
+ * There are 4 kind of specifiers:
+ * - co_wrapper functions can be called by only non-coroutine context, because
+ * they always generate a new coroutine.
+ * - co_wrapper_mixed functions can be called by both coroutine and
+ * non-coroutine context.
+ * - co_wrapper_bdrv_rdlock are co_wrapper functions but automatically take and
+ * release the graph rdlock when creating a new coroutine
+ * - co_wrapper_mixed_bdrv_rdlock are co_wrapper_mixed functions but
+ * automatically take and release the graph rdlock when creating a new
+ * coroutine.
+ *
+ * These functions should not be called from a coroutine_fn; instead,
+ * call the wrapped function directly.
+ */
+#define co_wrapper no_coroutine_fn
+#define co_wrapper_mixed no_coroutine_fn coroutine_mixed_fn
+#define co_wrapper_bdrv_rdlock no_coroutine_fn
+#define co_wrapper_mixed_bdrv_rdlock no_coroutine_fn coroutine_mixed_fn
+
+/*
+ * no_co_wrapper: Function specifier used by block-coroutine-wrapper.py
+ *
+ * Function specifier which does nothing but mark functions to be generated by
+ * scripts/block-coroutine-wrapper.py.
+ *
+ * A no_co_wrapper function declaration creates a coroutine_fn wrapper around
+ * functions that must not be called in coroutine context. It achieves this by
+ * scheduling a BH in the bottom half that runs the respective non-coroutine
+ * function. The coroutine yields after scheduling the BH and is reentered when
+ * the wrapped function returns.
+ *
+ * A no_co_wrapper_bdrv_rdlock function is a no_co_wrapper function that
+ * automatically takes the graph rdlock when calling the wrapped function. In
+ * the same way, no_co_wrapper_bdrv_wrlock functions automatically take the
+ * graph wrlock.
+ */
+#define no_co_wrapper
+#define no_co_wrapper_bdrv_rdlock
+#define no_co_wrapper_bdrv_wrlock
+
+#include "block/blockjob.h"
+
+/* block.c */
+typedef struct BlockDriver BlockDriver;
+typedef struct BdrvChild BdrvChild;
+typedef struct BdrvChildClass BdrvChildClass;
+
+typedef enum BlockZoneOp {
+ BLK_ZO_OPEN,
+ BLK_ZO_CLOSE,
+ BLK_ZO_FINISH,
+ BLK_ZO_RESET,
+} BlockZoneOp;
+
+typedef enum BlockZoneModel {
+ BLK_Z_NONE = 0x0, /* Regular block device */
+ BLK_Z_HM = 0x1, /* Host-managed zoned block device */
+ BLK_Z_HA = 0x2, /* Host-aware zoned block device */
+} BlockZoneModel;
+
+typedef enum BlockZoneState {
+ BLK_ZS_NOT_WP = 0x0,
+ BLK_ZS_EMPTY = 0x1,
+ BLK_ZS_IOPEN = 0x2,
+ BLK_ZS_EOPEN = 0x3,
+ BLK_ZS_CLOSED = 0x4,
+ BLK_ZS_RDONLY = 0xD,
+ BLK_ZS_FULL = 0xE,
+ BLK_ZS_OFFLINE = 0xF,
+} BlockZoneState;
+
+typedef enum BlockZoneType {
+ BLK_ZT_CONV = 0x1, /* Conventional random writes supported */
+ BLK_ZT_SWR = 0x2, /* Sequential writes required */
+ BLK_ZT_SWP = 0x3, /* Sequential writes preferred */
+} BlockZoneType;
+
+/*
+ * Zone descriptor data structure.
+ * Provides information on a zone with all position and size values in bytes.
+ */
+typedef struct BlockZoneDescriptor {
+ uint64_t start;
+ uint64_t length;
+ uint64_t cap;
+ uint64_t wp;
+ BlockZoneType type;
+ BlockZoneState state;
+} BlockZoneDescriptor;
+
+/*
+ * Track write pointers of a zone in bytes.
+ */
+typedef struct BlockZoneWps {
+ CoMutex colock;
+ uint64_t wp[];
+} BlockZoneWps;
+
+typedef struct BlockDriverInfo {
+ /* in bytes, 0 if irrelevant */
+ int cluster_size;
+ /*
+ * A fraction of cluster_size, if supported (currently QCOW2 only); if
+ * disabled or unsupported, set equal to cluster_size.
+ */
+ int subcluster_size;
+ /* offset at which the VM state can be saved (0 if not possible) */
+ int64_t vm_state_offset;
+ bool is_dirty;
+ /*
+ * True if this block driver only supports compressed writes
+ */
+ bool needs_compressed_writes;
+} BlockDriverInfo;
+
+typedef struct BlockFragInfo {
+ uint64_t allocated_clusters;
+ uint64_t total_clusters;
+ uint64_t fragmented_clusters;
+ uint64_t compressed_clusters;
+} BlockFragInfo;
+
+typedef enum {
+ BDRV_REQ_COPY_ON_READ = 0x1,
+ BDRV_REQ_ZERO_WRITE = 0x2,
+
+ /*
+ * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
+ * that the block driver should unmap (discard) blocks if it is guaranteed
+ * that the result will read back as zeroes. The flag is only passed to the
+ * driver if the block device is opened with BDRV_O_UNMAP.
+ */
+ BDRV_REQ_MAY_UNMAP = 0x4,
+
+ /*
+ * An optimization hint when all QEMUIOVector elements are within
+ * previously registered bdrv_register_buf() memory ranges.
+ *
+ * Code that replaces the user's QEMUIOVector elements with bounce buffers
+ * must take care to clear this flag.
+ */
+ BDRV_REQ_REGISTERED_BUF = 0x8,
+
+ BDRV_REQ_FUA = 0x10,
+ BDRV_REQ_WRITE_COMPRESSED = 0x20,
+
+ /*
+ * Signifies that this write request will not change the visible disk
+ * content.
+ */
+ BDRV_REQ_WRITE_UNCHANGED = 0x40,
+
+ /*
+ * Forces request serialisation. Use only with write requests.
+ */
+ BDRV_REQ_SERIALISING = 0x80,
+
+ /*
+ * Execute the request only if the operation can be offloaded or otherwise
+ * be executed efficiently, but return an error instead of using a slow
+ * fallback.
+ */
+ BDRV_REQ_NO_FALLBACK = 0x100,
+
+ /*
+ * BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read
+ * (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR
+ * filter is involved), in which case it signals that the COR operation
+ * need not read the data into memory (qiov) but only ensure they are
+ * copied to the top layer (i.e., that COR operation is done).
+ */
+ BDRV_REQ_PREFETCH = 0x200,
+
+ /*
+ * If we need to wait for other requests, just fail immediately. Used
+ * only together with BDRV_REQ_SERIALISING. Used only with requests aligned
+ * to request_alignment (corresponding assertions are in block/io.c).
+ */
+ BDRV_REQ_NO_WAIT = 0x400,
+
+ /* Mask of valid flags */
+ BDRV_REQ_MASK = 0x7ff,
+} BdrvRequestFlags;
+
+#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */
+#define BDRV_O_RDWR 0x0002
+#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
+#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save
+ writes in a snapshot */
+#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
+#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
+#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the
+ thread pool */
+#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
+#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
+#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
+#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
+#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
+#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
+#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
+#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
+ select an appropriate protocol driver,
+ ignoring the format layer */
+#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
+#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening
+ read-write fails */
+#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */
+
+#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
+
+
+/* Option names of options parsed by the block layer */
+
+#define BDRV_OPT_CACHE_WB "cache.writeback"
+#define BDRV_OPT_CACHE_DIRECT "cache.direct"
+#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
+#define BDRV_OPT_READ_ONLY "read-only"
+#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
+#define BDRV_OPT_DISCARD "discard"
+#define BDRV_OPT_FORCE_SHARE "force-share"
+
+
+#define BDRV_SECTOR_BITS 9
+#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
+
+/*
+ * Get the first most significant bit of wp. If it is zero, then
+ * the zone type is SWR.
+ */
+#define BDRV_ZT_IS_CONV(wp) (wp & (1ULL << 63))
+
+#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
+ INT_MAX >> BDRV_SECTOR_BITS)
+#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
+
+/*
+ * We want allow aligning requests and disk length up to any 32bit alignment
+ * and don't afraid of overflow.
+ * To achieve it, and in the same time use some pretty number as maximum disk
+ * size, let's define maximum "length" (a limit for any offset/bytes request and
+ * for disk size) to be the greatest power of 2 less than INT64_MAX.
+ */
+#define BDRV_MAX_ALIGNMENT (1L << 30)
+#define BDRV_MAX_LENGTH (QEMU_ALIGN_DOWN(INT64_MAX, BDRV_MAX_ALIGNMENT))
+
+/*
+ * Allocation status flags for bdrv_block_status() and friends.
+ *
+ * Public flags:
+ * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
+ * BDRV_BLOCK_ZERO: offset reads as zero
+ * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
+ * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
+ * layer rather than any backing, set by block layer
+ * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
+ * layer, set by block layer
+ * BDRV_BLOCK_COMPRESSED: the underlying data is compressed; only valid for
+ * the formats supporting compression: qcow, qcow2
+ *
+ * Internal flags:
+ * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
+ * that the block layer recompute the answer from the returned
+ * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
+ * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for
+ * zeroes in file child of current block node inside
+ * returned region. Only valid together with both
+ * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not
+ * appear with BDRV_BLOCK_ZERO.
+ *
+ * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
+ * host offset within the returned BDS that is allocated for the
+ * corresponding raw guest data. However, whether that offset
+ * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
+ *
+ * DATA ZERO OFFSET_VALID
+ * t t t sectors read as zero, returned file is zero at offset
+ * t f t sectors read as valid from file at offset
+ * f t t sectors preallocated, read as zero, returned file not
+ * necessarily zero at offset
+ * f f t sectors preallocated but read from backing_hd,
+ * returned file contains garbage at offset
+ * t t f sectors preallocated, read as zero, unknown offset
+ * t f f sectors read from unknown file or offset
+ * f t f not allocated or unknown offset, read as zero
+ * f f f not allocated or unknown offset, read from backing_hd
+ */
+#define BDRV_BLOCK_DATA 0x01
+#define BDRV_BLOCK_ZERO 0x02
+#define BDRV_BLOCK_OFFSET_VALID 0x04
+#define BDRV_BLOCK_RAW 0x08
+#define BDRV_BLOCK_ALLOCATED 0x10
+#define BDRV_BLOCK_EOF 0x20
+#define BDRV_BLOCK_RECURSE 0x40
+#define BDRV_BLOCK_COMPRESSED 0x80
+
+typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
+
+typedef struct BDRVReopenState {
+ BlockDriverState *bs;
+ int flags;
+ BlockdevDetectZeroesOptions detect_zeroes;
+ bool backing_missing;
+ BlockDriverState *old_backing_bs; /* keep pointer for permissions update */
+ BlockDriverState *old_file_bs; /* keep pointer for permissions update */
+ QDict *options;
+ QDict *explicit_options;
+ void *opaque;
+} BDRVReopenState;
+
+/*
+ * Block operation types
+ */
+typedef enum BlockOpType {
+ BLOCK_OP_TYPE_BACKUP_SOURCE,
+ BLOCK_OP_TYPE_BACKUP_TARGET,
+ BLOCK_OP_TYPE_CHANGE,
+ BLOCK_OP_TYPE_COMMIT_SOURCE,
+ BLOCK_OP_TYPE_COMMIT_TARGET,
+ BLOCK_OP_TYPE_DATAPLANE,
+ BLOCK_OP_TYPE_DRIVE_DEL,
+ BLOCK_OP_TYPE_EJECT,
+ BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
+ BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
+ BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
+ BLOCK_OP_TYPE_MIRROR_SOURCE,
+ BLOCK_OP_TYPE_MIRROR_TARGET,
+ BLOCK_OP_TYPE_RESIZE,
+ BLOCK_OP_TYPE_STREAM,
+ BLOCK_OP_TYPE_REPLACE,
+ BLOCK_OP_TYPE_MAX,
+} BlockOpType;
+
+/* Block node permission constants */
+enum {
+ /**
+ * A user that has the "permission" of consistent reads is guaranteed that
+ * their view of the contents of the block device is complete and
+ * self-consistent, representing the contents of a disk at a specific
+ * point.
+ *
+ * For most block devices (including their backing files) this is true, but
+ * the property cannot be maintained in a few situations like for
+ * intermediate nodes of a commit block job.
+ */
+ BLK_PERM_CONSISTENT_READ = 0x01,
+
+ /** This permission is required to change the visible disk contents. */
+ BLK_PERM_WRITE = 0x02,
+
+ /**
+ * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
+ * required for writes to the block node when the caller promises that
+ * the visible disk content doesn't change.
+ *
+ * As the BLK_PERM_WRITE permission is strictly stronger, either is
+ * sufficient to perform an unchanging write.
+ */
+ BLK_PERM_WRITE_UNCHANGED = 0x04,
+
+ /** This permission is required to change the size of a block node. */
+ BLK_PERM_RESIZE = 0x08,
+
+ /**
+ * There was a now-removed bit BLK_PERM_GRAPH_MOD, with value of 0x10. QEMU
+ * 6.1 and earlier may still lock the corresponding byte in block/file-posix
+ * locking. So, implementing some new permission should be very careful to
+ * not interfere with this old unused thing.
+ */
+
+ BLK_PERM_ALL = 0x0f,
+
+ DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
+ | BLK_PERM_WRITE
+ | BLK_PERM_WRITE_UNCHANGED
+ | BLK_PERM_RESIZE,
+
+ DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
+};
+
+/*
+ * Flags that parent nodes assign to child nodes to specify what kind of
+ * role(s) they take.
+ *
+ * At least one of DATA, METADATA, FILTERED, or COW must be set for
+ * every child.
+ *
+ *
+ * = Connection with bs->children, bs->file and bs->backing fields =
+ *
+ * 1. Filters
+ *
+ * Filter drivers have drv->is_filter = true.
+ *
+ * Filter node has exactly one FILTERED|PRIMARY child, and may have other
+ * children which must not have these bits (one example is the
+ * copy-before-write filter, which also has its target DATA child).
+ *
+ * Filter nodes never have COW children.
+ *
+ * For most filters, the filtered child is linked in bs->file, bs->backing is
+ * NULL. For some filters (as an exception), it is the other way around; those
+ * drivers will have drv->filtered_child_is_backing set to true (see that
+ * field’s documentation for what drivers this concerns)
+ *
+ * 2. "raw" driver (block/raw-format.c)
+ *
+ * Formally it's not a filter (drv->is_filter = false)
+ *
+ * bs->backing is always NULL
+ *
+ * Only has one child, linked in bs->file. Its role is either FILTERED|PRIMARY
+ * (like filter) or DATA|PRIMARY depending on options.
+ *
+ * 3. Other drivers
+ *
+ * Don't have any FILTERED children.
+ *
+ * May have at most one COW child. In this case it's linked in bs->backing.
+ * Otherwise bs->backing is NULL. COW child is never PRIMARY.
+ *
+ * May have at most one PRIMARY child. In this case it's linked in bs->file.
+ * Otherwise bs->file is NULL.
+ *
+ * May also have some other children that don't have the PRIMARY or COW bit set.
+ */
+enum BdrvChildRoleBits {
+ /*
+ * This child stores data.
+ * Any node may have an arbitrary number of such children.
+ */
+ BDRV_CHILD_DATA = (1 << 0),
+
+ /*
+ * This child stores metadata.
+ * Any node may have an arbitrary number of metadata-storing
+ * children.
+ */
+ BDRV_CHILD_METADATA = (1 << 1),
+
+ /*
+ * A child that always presents exactly the same visible data as
+ * the parent, e.g. by virtue of the parent forwarding all reads
+ * and writes.
+ * This flag is mutually exclusive with DATA, METADATA, and COW.
+ * Any node may have at most one filtered child at a time.
+ */
+ BDRV_CHILD_FILTERED = (1 << 2),
+
+ /*
+ * Child from which to read all data that isn't allocated in the
+ * parent (i.e., the backing child); such data is copied to the
+ * parent through COW (and optionally COR).
+ * This field is mutually exclusive with DATA, METADATA, and
+ * FILTERED.
+ * Any node may have at most one such backing child at a time.
+ */
+ BDRV_CHILD_COW = (1 << 3),
+
+ /*
+ * The primary child. For most drivers, this is the child whose
+ * filename applies best to the parent node.
+ * Any node may have at most one primary child at a time.
+ */
+ BDRV_CHILD_PRIMARY = (1 << 4),
+
+ /* Useful combination of flags */
+ BDRV_CHILD_IMAGE = BDRV_CHILD_DATA
+ | BDRV_CHILD_METADATA
+ | BDRV_CHILD_PRIMARY,
+};
+
+/* Mask of BdrvChildRoleBits values */
+typedef unsigned int BdrvChildRole;
+
+typedef struct BdrvCheckResult {
+ int corruptions;
+ int leaks;
+ int check_errors;
+ int corruptions_fixed;
+ int leaks_fixed;
+ int64_t image_end_offset;
+ BlockFragInfo bfi;
+} BdrvCheckResult;
+
+typedef enum {
+ BDRV_FIX_LEAKS = 1,
+ BDRV_FIX_ERRORS = 2,
+} BdrvCheckMode;
+
+typedef struct BlockSizes {
+ uint32_t phys;
+ uint32_t log;
+} BlockSizes;
+
+typedef struct HDGeometry {
+ uint32_t heads;
+ uint32_t sectors;
+ uint32_t cylinders;
+} HDGeometry;
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * These functions must never call any function from other categories
+ * (I/O, "I/O or GS", Global State) except this one, but can be invoked by
+ * all of them.
+ */
+
+char *bdrv_perm_names(uint64_t perm);
+uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm);
+
+void bdrv_init_with_whitelist(void);
+bool bdrv_uses_whitelist(void);
+int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
+
+int bdrv_parse_aio(const char *mode, int *flags);
+int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
+int bdrv_parse_discard_flags(const char *mode, int *flags);
+
+int path_has_protocol(const char *path);
+int path_is_absolute(const char *path);
+char *path_combine(const char *base_path, const char *filename);
+
+char *bdrv_get_full_backing_filename_from_filename(const char *backed,
+ const char *backing,
+ Error **errp);
+
+#endif /* BLOCK_COMMON_H */
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index aac85e1488..0700953ab8 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -15,33 +15,87 @@
#ifndef BLOCK_COPY_H
#define BLOCK_COPY_H
-#include "block/block.h"
-#include "qemu/co-shared-resource.h"
+#include "block/block-common.h"
+#include "qemu/progress_meter.h"
-typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
+/* All APIs are thread-safe */
+
+typedef void (*BlockCopyAsyncCallbackFunc)(void *opaque);
typedef struct BlockCopyState BlockCopyState;
+typedef struct BlockCopyCallState BlockCopyCallState;
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
- int64_t cluster_size,
- BdrvRequestFlags write_flags,
+ const BdrvDirtyBitmap *bitmap,
Error **errp);
-void block_copy_set_progress_callback(
- BlockCopyState *s,
- ProgressBytesCallbackFunc progress_bytes_callback,
- void *progress_opaque);
-
+/* Function should be called prior any actual copy request */
+void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
+ bool compress);
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
void block_copy_state_free(BlockCopyState *s);
-int64_t block_copy_reset_unallocated(BlockCopyState *s,
- int64_t offset, int64_t *count);
+void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
+
+int64_t coroutine_fn GRAPH_RDLOCK
+block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
- bool *error_is_read);
+ bool ignore_ratelimit, uint64_t timeout_ns,
+ BlockCopyAsyncCallbackFunc cb,
+ void *cb_opaque);
+
+/*
+ * Run block-copy in a coroutine, create corresponding BlockCopyCallState
+ * object and return pointer to it. Never returns NULL.
+ *
+ * Caller is responsible to call block_copy_call_free() to free
+ * BlockCopyCallState object.
+ *
+ * @max_workers means maximum of parallel coroutines to execute sub-requests,
+ * must be > 0.
+ *
+ * @max_chunk means maximum length for one IO operation. Zero means unlimited.
+ */
+BlockCopyCallState *block_copy_async(BlockCopyState *s,
+ int64_t offset, int64_t bytes,
+ int max_workers, int64_t max_chunk,
+ BlockCopyAsyncCallbackFunc cb,
+ void *cb_opaque);
+
+/*
+ * Free finished BlockCopyCallState. Trying to free running
+ * block-copy will crash.
+ */
+void block_copy_call_free(BlockCopyCallState *call_state);
+
+/*
+ * Note, that block-copy call is marked finished prior to calling
+ * the callback.
+ */
+bool block_copy_call_finished(BlockCopyCallState *call_state);
+bool block_copy_call_succeeded(BlockCopyCallState *call_state);
+bool block_copy_call_failed(BlockCopyCallState *call_state);
+bool block_copy_call_cancelled(BlockCopyCallState *call_state);
+int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read);
+
+void block_copy_set_speed(BlockCopyState *s, uint64_t speed);
+void block_copy_kick(BlockCopyCallState *call_state);
+
+/*
+ * Cancel running block-copy call.
+ *
+ * Cancel leaves block-copy state valid: dirty bits are correct and you may use
+ * cancel + <run block_copy with same parameters> to emulate pause/resume.
+ *
+ * Note also, that the cancel is async: it only marks block-copy call to be
+ * cancelled. So, the call may be cancelled (block_copy_call_cancelled() reports
+ * true) but not yet finished (block_copy_call_finished() reports false).
+ */
+void block_copy_call_cancel(BlockCopyCallState *call_state);
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
+int64_t block_copy_cluster_size(BlockCopyState *s);
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
#endif /* BLOCK_COPY_H */
diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h
new file mode 100644
index 0000000000..bd7cecd1cf
--- /dev/null
+++ b/include/block/block-global-state.h
@@ -0,0 +1,305 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_GLOBAL_STATE_H
+#define BLOCK_GLOBAL_STATE_H
+
+#include "block/block-common.h"
+#include "qemu/coroutine.h"
+#include "qemu/transactions.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * If a function modifies the graph, it also uses the graph lock to be sure it
+ * has unique access. The graph lock is needed together with BQL because of the
+ * thread-safe I/O API that concurrently runs and accesses the graph without
+ * the BQL.
+ *
+ * It is important to note that not all of these functions are
+ * necessarily limited to running under the BQL, but they would
+ * require additional auditing and many small thread-safety changes
+ * to move them into the I/O API. Often it's not worth doing that
+ * work since the APIs are only used with the BQL held at the
+ * moment, so they have been placed in the GS API (for now).
+ *
+ * These functions can call any function from this and other categories
+ * (I/O, "I/O or GS", Common), but must be invoked only by other GS APIs.
+ *
+ * All functions in this header must use the macro
+ * GLOBAL_STATE_CODE();
+ * to catch when they are accidentally called without the BQL.
+ */
+
+void bdrv_init(void);
+BlockDriver *bdrv_find_protocol(const char *filename,
+ bool allow_protocol_prefix,
+ Error **errp);
+BlockDriver *bdrv_find_format(const char *format_name);
+
+int coroutine_fn GRAPH_UNLOCKED
+bdrv_co_create(BlockDriver *drv, const char *filename, QemuOpts *opts,
+ Error **errp);
+
+int co_wrapper bdrv_create(BlockDriver *drv, const char *filename,
+ QemuOpts *opts, Error **errp);
+
+int coroutine_fn GRAPH_UNLOCKED
+bdrv_co_create_file(const char *filename, QemuOpts *opts, Error **errp);
+
+BlockDriverState *bdrv_new(void);
+int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
+ Error **errp);
+
+int GRAPH_WRLOCK
+bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, Error **errp);
+
+int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
+ Error **errp);
+BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
+ int flags, Error **errp);
+int bdrv_drop_filter(BlockDriverState *bs, Error **errp);
+
+BdrvChild * no_coroutine_fn
+bdrv_open_child(const char *filename, QDict *options, const char *bdref_key,
+ BlockDriverState *parent, const BdrvChildClass *child_class,
+ BdrvChildRole child_role, bool allow_none, Error **errp);
+
+BdrvChild * coroutine_fn no_co_wrapper
+bdrv_co_open_child(const char *filename, QDict *options, const char *bdref_key,
+ BlockDriverState *parent, const BdrvChildClass *child_class,
+ BdrvChildRole child_role, bool allow_none, Error **errp);
+
+int bdrv_open_file_child(const char *filename,
+ QDict *options, const char *bdref_key,
+ BlockDriverState *parent, Error **errp);
+
+BlockDriverState * no_coroutine_fn
+bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
+
+BlockDriverState * coroutine_fn no_co_wrapper
+bdrv_co_open_blockdev_ref(BlockdevRef *ref, Error **errp);
+
+int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
+ Error **errp);
+int GRAPH_WRLOCK
+bdrv_set_backing_hd_drained(BlockDriverState *bs, BlockDriverState *backing_hd,
+ Error **errp);
+
+int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
+ const char *bdref_key, Error **errp);
+
+BlockDriverState * no_coroutine_fn
+bdrv_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+BlockDriverState * coroutine_fn no_co_wrapper
+bdrv_co_open(const char *filename, const char *reference,
+ QDict *options, int flags, Error **errp);
+
+BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv,
+ const char *node_name,
+ QDict *options, int flags,
+ Error **errp);
+BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
+ int flags, Error **errp);
+BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
+ BlockDriverState *bs, QDict *options,
+ bool keep_old_opts);
+void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue);
+int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
+int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
+ Error **errp);
+int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
+ Error **errp);
+BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
+ const char *backing_file);
+void GRAPH_RDLOCK bdrv_refresh_filename(BlockDriverState *bs);
+
+void GRAPH_RDLOCK
+bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp);
+
+int bdrv_commit(BlockDriverState *bs);
+int GRAPH_RDLOCK bdrv_make_empty(BdrvChild *c, Error **errp);
+
+void bdrv_register(BlockDriver *bdrv);
+int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
+ const char *backing_file_str,
+ bool backing_mask_protocol);
+
+BlockDriverState * GRAPH_RDLOCK
+bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs);
+
+BlockDriverState * GRAPH_RDLOCK bdrv_find_base(BlockDriverState *bs);
+
+int GRAPH_RDLOCK
+bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
+ Error **errp);
+void GRAPH_RDLOCK
+bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
+
+/*
+ * The units of offset and total_work_size may be chosen arbitrarily by the
+ * block driver; total_work_size may change during the course of the amendment
+ * operation
+ */
+typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
+ int64_t total_work_size, void *opaque);
+int GRAPH_RDLOCK
+bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
+ BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
+ bool force, Error **errp);
+
+/* check if a named node can be replaced when doing drive-mirror */
+BlockDriverState * GRAPH_RDLOCK
+check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
+ Error **errp);
+
+int no_coroutine_fn GRAPH_RDLOCK
+bdrv_activate(BlockDriverState *bs, Error **errp);
+
+int coroutine_fn no_co_wrapper_bdrv_rdlock
+bdrv_co_activate(BlockDriverState *bs, Error **errp);
+
+void bdrv_activate_all(Error **errp);
+int bdrv_inactivate_all(void);
+
+int bdrv_flush_all(void);
+void bdrv_close_all(void);
+void bdrv_drain_all_begin(void);
+void bdrv_drain_all_begin_nopoll(void);
+void bdrv_drain_all_end(void);
+void bdrv_drain_all(void);
+
+void bdrv_aio_cancel(BlockAIOCB *acb);
+
+int bdrv_has_zero_init_1(BlockDriverState *bs);
+int coroutine_mixed_fn GRAPH_RDLOCK bdrv_has_zero_init(BlockDriverState *bs);
+BlockDriverState *bdrv_find_node(const char *node_name);
+BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
+XDbgBlockGraph * GRAPH_RDLOCK bdrv_get_xdbg_block_graph(Error **errp);
+BlockDriverState *bdrv_lookup_bs(const char *device,
+ const char *node_name,
+ Error **errp);
+bool GRAPH_RDLOCK
+bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
+
+BlockDriverState *bdrv_next_node(BlockDriverState *bs);
+BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
+
+typedef struct BdrvNextIterator {
+ enum {
+ BDRV_NEXT_BACKEND_ROOTS,
+ BDRV_NEXT_MONITOR_OWNED,
+ } phase;
+ BlockBackend *blk;
+ BlockDriverState *bs;
+} BdrvNextIterator;
+
+BlockDriverState * GRAPH_RDLOCK bdrv_first(BdrvNextIterator *it);
+BlockDriverState * GRAPH_RDLOCK bdrv_next(BdrvNextIterator *it);
+void bdrv_next_cleanup(BdrvNextIterator *it);
+
+BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
+void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
+ void *opaque, bool read_only);
+
+char * GRAPH_RDLOCK
+bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
+
+char * GRAPH_RDLOCK bdrv_dirname(BlockDriverState *bs, Error **errp);
+
+void bdrv_img_create(const char *filename, const char *fmt,
+ const char *base_filename, const char *base_fmt,
+ char *options, uint64_t img_size, int flags,
+ bool quiet, Error **errp);
+
+void bdrv_ref(BlockDriverState *bs);
+void no_coroutine_fn bdrv_unref(BlockDriverState *bs);
+void coroutine_fn no_co_wrapper bdrv_co_unref(BlockDriverState *bs);
+void GRAPH_WRLOCK bdrv_schedule_unref(BlockDriverState *bs);
+
+void GRAPH_WRLOCK
+bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
+
+void coroutine_fn no_co_wrapper_bdrv_wrlock
+bdrv_co_unref_child(BlockDriverState *parent, BdrvChild *child);
+
+BdrvChild * GRAPH_WRLOCK
+bdrv_attach_child(BlockDriverState *parent_bs,
+ BlockDriverState *child_bs,
+ const char *child_name,
+ const BdrvChildClass *child_class,
+ BdrvChildRole child_role,
+ Error **errp);
+
+bool GRAPH_RDLOCK
+bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
+
+void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
+void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
+void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
+void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
+bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
+
+int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
+ const char *tag);
+int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
+int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
+bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
+
+bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp);
+int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx,
+ BdrvChild *ignore_child, Error **errp);
+
+int GRAPH_RDLOCK bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
+int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
+
+void GRAPH_WRLOCK
+bdrv_add_child(BlockDriverState *parent, BlockDriverState *child, Error **errp);
+
+void GRAPH_WRLOCK
+bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
+
+/**
+ *
+ * bdrv_register_buf/bdrv_unregister_buf:
+ *
+ * Register/unregister a buffer for I/O. For example, VFIO drivers are
+ * interested to know the memory areas that would later be used for I/O, so
+ * that they can prepare IOMMU mapping etc., to get better performance.
+ *
+ * Buffers must not overlap and they must be unregistered with the same <host,
+ * size> values that they were registered with.
+ *
+ * Returns: true on success, false on failure
+ */
+bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
+ Error **errp);
+void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size);
+
+void bdrv_cancel_in_flight(BlockDriverState *bs);
+
+#endif /* BLOCK_GLOBAL_STATE_H */
diff --git a/include/block/block-hmp-cmds.h b/include/block/block-hmp-cmds.h
index 3412e108ca..71113cd7ef 100644
--- a/include/block/block-hmp-cmds.h
+++ b/include/block/block-hmp-cmds.h
@@ -12,8 +12,10 @@
* the COPYING file in the top-level directory.
*/
-#ifndef BLOCK_HMP_COMMANDS_H
-#define BLOCK_HMP_COMMANDS_H
+#ifndef BLOCK_BLOCK_HMP_CMDS_H
+#define BLOCK_BLOCK_HMP_CMDS_H
+
+#include "qemu/coroutine.h"
void hmp_drive_add(Monitor *mon, const QDict *qdict);
@@ -38,7 +40,7 @@ void hmp_nbd_server_add(Monitor *mon, const QDict *qdict);
void hmp_nbd_server_remove(Monitor *mon, const QDict *qdict);
void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict);
-void hmp_block_resize(Monitor *mon, const QDict *qdict);
+void coroutine_fn hmp_block_resize(Monitor *mon, const QDict *qdict);
void hmp_block_stream(Monitor *mon, const QDict *qdict);
void hmp_block_passwd(Monitor *mon, const QDict *qdict);
void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict);
diff --git a/include/block/block-io.h b/include/block/block-io.h
new file mode 100644
index 0000000000..b49e0537dd
--- /dev/null
+++ b/include/block/block-io.h
@@ -0,0 +1,455 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_IO_H
+#define BLOCK_IO_H
+
+#include "block/aio-wait.h"
+#include "block/block-common.h"
+#include "qemu/coroutine.h"
+#include "qemu/iov.h"
+
+/*
+ * I/O API functions. These functions are thread-safe, and therefore
+ * can run in any thread.
+ *
+ * These functions can only call functions from I/O and Common categories,
+ * but can be invoked by GS, "I/O or GS" and I/O APIs.
+ *
+ * All functions in this category must use the macro
+ * IO_CODE();
+ * to catch when they are accidentally called by the wrong API.
+ */
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pread(BdrvChild *child, int64_t offset, int64_t bytes, void *buf,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite(BdrvChild *child, int64_t offset,int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+/*
+ * Efficiently zero a region of the disk image. Note that this is a regular
+ * I/O request like read or write and should have a reasonable size. This
+ * function is not suitable for zeroing the entire image in a single request
+ * because it may allocate memory for the entire region.
+ */
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_nb_sectors(BlockDriverState *bs);
+int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs);
+
+int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_getlength(BlockDriverState *bs);
+int64_t co_wrapper_mixed_bdrv_rdlock bdrv_getlength(BlockDriverState *bs);
+
+int64_t coroutine_fn GRAPH_RDLOCK
+bdrv_co_get_allocated_file_size(BlockDriverState *bs);
+
+int64_t co_wrapper_bdrv_rdlock
+bdrv_get_allocated_file_size(BlockDriverState *bs);
+
+BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
+ BlockDriverState *in_bs, Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_delete_file_noerr(BlockDriverState *bs);
+
+
+/* async block I/O */
+void bdrv_aio_cancel_async(BlockAIOCB *acb);
+
+/* sg packet commands */
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
+
+/* Ensure contents are flushed to disk. */
+int coroutine_fn GRAPH_RDLOCK bdrv_co_flush(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
+ int64_t bytes);
+
+/* Report zone information of zone block device. */
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_report(BlockDriverState *bs,
+ int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_mgmt(BlockDriverState *bs,
+ BlockZoneOp op,
+ int64_t offset, int64_t len);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_append(BlockDriverState *bs,
+ int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_block_status_above(BlockDriverState *bs, BlockDriverState *base,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
+ int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ int64_t *pnum);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
+ bool include_base, int64_t offset, int64_t bytes,
+ int64_t *pnum);
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_is_allocated_above(BlockDriverState *bs, BlockDriverState *base,
+ bool include_base, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+int GRAPH_RDLOCK
+bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
+ Error **errp);
+
+bool bdrv_is_read_only(BlockDriverState *bs);
+bool bdrv_is_writable(BlockDriverState *bs);
+bool bdrv_is_sg(BlockDriverState *bs);
+int bdrv_get_flags(BlockDriverState *bs);
+
+bool coroutine_fn GRAPH_RDLOCK bdrv_co_is_inserted(BlockDriverState *bs);
+bool co_wrapper_bdrv_rdlock bdrv_is_inserted(BlockDriverState *bs);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_lock_medium(BlockDriverState *bs, bool locked);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_eject(BlockDriverState *bs, bool eject_flag);
+
+const char *bdrv_get_format_name(BlockDriverState *bs);
+
+bool GRAPH_RDLOCK bdrv_supports_compressed_writes(BlockDriverState *bs);
+const char *bdrv_get_node_name(const BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK
+bdrv_get_device_name(const BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK
+bdrv_get_device_or_node_name(const BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+ImageInfoSpecific * GRAPH_RDLOCK
+bdrv_get_specific_info(BlockDriverState *bs, Error **errp);
+
+BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
+void bdrv_round_to_subclusters(BlockDriverState *bs,
+ int64_t offset, int64_t bytes,
+ int64_t *cluster_offset,
+ int64_t *cluster_bytes);
+
+void bdrv_get_backing_filename(BlockDriverState *bs,
+ char *filename, int filename_size);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt, bool warn);
+
+int co_wrapper_bdrv_rdlock
+bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt, bool warn);
+
+int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
+ int64_t pos, int size);
+
+int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
+ int64_t pos, int size);
+
+/*
+ * Returns the alignment in bytes that is required so that no bounce buffer
+ * is required throughout the stack
+ */
+size_t bdrv_min_mem_align(BlockDriverState *bs);
+/* Returns optimal alignment in bytes for bounce buffer */
+size_t bdrv_opt_mem_align(BlockDriverState *bs);
+void *qemu_blockalign(BlockDriverState *bs, size_t size);
+void *qemu_blockalign0(BlockDriverState *bs, size_t size);
+void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
+void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
+
+void bdrv_enable_copy_on_read(BlockDriverState *bs);
+void bdrv_disable_copy_on_read(BlockDriverState *bs);
+
+void coroutine_fn GRAPH_RDLOCK
+bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event);
+
+void co_wrapper_mixed_bdrv_rdlock
+bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
+
+#define BLKDBG_CO_EVENT(child, evt) \
+ do { \
+ if (child) { \
+ bdrv_co_debug_event(child->bs, evt); \
+ } \
+ } while (0)
+
+#define BLKDBG_EVENT(child, evt) \
+ do { \
+ if (child) { \
+ bdrv_debug_event(child->bs, evt); \
+ } \
+ } while (0)
+
+/**
+ * bdrv_get_aio_context:
+ *
+ * Returns: the currently bound #AioContext
+ */
+AioContext *bdrv_get_aio_context(BlockDriverState *bs);
+
+AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c);
+
+/**
+ * Move the current coroutine to the AioContext of @bs and return the old
+ * AioContext of the coroutine. Increase bs->in_flight so that draining @bs
+ * will wait for the operation to proceed until the corresponding
+ * bdrv_co_leave().
+ *
+ * Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
+ * this will deadlock.
+ */
+AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs);
+
+/**
+ * Ends a section started by bdrv_co_enter(). Move the current coroutine back
+ * to old_ctx and decrease bs->in_flight again.
+ */
+void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx);
+
+AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c);
+
+bool coroutine_fn GRAPH_RDLOCK
+bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
+bool co_wrapper_bdrv_rdlock
+bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
+ uint32_t granularity, Error **errp);
+
+/**
+ *
+ * bdrv_co_copy_range:
+ *
+ * Do offloaded copy between two children. If the operation is not implemented
+ * by the driver, or if the backend storage doesn't support it, a negative
+ * error code will be returned.
+ *
+ * Note: block layer doesn't emulate or fallback to a bounce buffer approach
+ * because usually the caller shouldn't attempt offloaded copy any more (e.g.
+ * calling copy_file_range(2)) after the first error, thus it should fall back
+ * to a read+write path in the caller level.
+ *
+ * @src: Source child to copy data from
+ * @src_offset: offset in @src image to read data
+ * @dst: Destination child to copy data to
+ * @dst_offset: offset in @dst image to write data
+ * @bytes: number of bytes to copy
+ * @flags: request flags. Supported flags:
+ * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
+ * write on @dst as if bdrv_co_pwrite_zeroes is
+ * called. Used to simplify caller code, or
+ * during BlockDriver.bdrv_co_copy_range_from()
+ * recursion.
+ * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
+ * requests currently in flight.
+ *
+ * Returns: 0 if succeeded; negative error code if failed.
+ **/
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * More specifically, these functions use BDRV_POLL_WHILE(bs), which requires
+ * the caller to be either in the main thread or directly in the home thread
+ * that runs the bs AioContext. Calling them from another thread in another
+ * AioContext would cause deadlocks.
+ *
+ * Therefore, these functions are not proper I/O, because they
+ * can't run in *any* iothreads, but only in a specific one.
+ *
+ * These functions can call any function from I/O, Common and this
+ * categories, but must be invoked only by other "I/O or GS" and GS APIs.
+ *
+ * All functions in this category must use the macro
+ * IO_OR_GS_CODE();
+ * to catch when they are accidentally called by the wrong API.
+ */
+
+#define BDRV_POLL_WHILE(bs, cond) ({ \
+ BlockDriverState *bs_ = (bs); \
+ IO_OR_GS_CODE(); \
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
+ cond); })
+
+void bdrv_drain(BlockDriverState *bs);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
+
+/* Invalidate any cached metadata used by image formats */
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
+
+int co_wrapper_mixed_bdrv_rdlock bdrv_flush(BlockDriverState *bs);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+/**
+ * bdrv_parent_drained_begin_single:
+ *
+ * Begin a quiesced section for the parent of @c.
+ */
+void GRAPH_RDLOCK bdrv_parent_drained_begin_single(BdrvChild *c);
+
+/**
+ * bdrv_parent_drained_poll_single:
+ *
+ * Returns true if there is any pending activity to cease before @c can be
+ * called quiesced, false otherwise.
+ */
+bool GRAPH_RDLOCK bdrv_parent_drained_poll_single(BdrvChild *c);
+
+/**
+ * bdrv_parent_drained_end_single:
+ *
+ * End a quiesced section for the parent of @c.
+ */
+void GRAPH_RDLOCK bdrv_parent_drained_end_single(BdrvChild *c);
+
+/**
+ * bdrv_drain_poll:
+ *
+ * Poll for pending requests in @bs and its parents (except for @ignore_parent).
+ *
+ * If @ignore_bds_parents is true, parents that are BlockDriverStates must
+ * ignore the drain request because they will be drained separately (used for
+ * drain_all).
+ *
+ * This is part of bdrv_drained_begin.
+ */
+bool GRAPH_RDLOCK
+bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
+ bool ignore_bds_parents);
+
+/**
+ * bdrv_drained_begin:
+ *
+ * Begin a quiesced section for exclusive access to the BDS, by disabling
+ * external request sources including NBD server, block jobs, and device model.
+ *
+ * This function can only be invoked by the main loop or a coroutine
+ * (regardless of the AioContext where it is running).
+ * If the coroutine is running in an Iothread AioContext, this function will
+ * just schedule a BH to run in the main loop.
+ * However, it cannot be directly called by an Iothread.
+ *
+ * This function can be recursive.
+ */
+void bdrv_drained_begin(BlockDriverState *bs);
+
+/**
+ * bdrv_do_drained_begin_quiesce:
+ *
+ * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
+ * running requests to complete.
+ */
+void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent);
+
+/**
+ * bdrv_drained_end:
+ *
+ * End a quiescent section started by bdrv_drained_begin().
+ *
+ * This function can only be invoked by the main loop or a coroutine
+ * (regardless of the AioContext where it is running).
+ * If the coroutine is running in an Iothread AioContext, this function will
+ * just schedule a BH to run in the main loop.
+ * However, it cannot be directly called by an Iothread.
+ */
+void bdrv_drained_end(BlockDriverState *bs);
+
+#endif /* BLOCK_IO_H */
diff --git a/include/block/block.h b/include/block/block.h
index 6e36154061..e2c647de27 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -1,792 +1,32 @@
-#ifndef BLOCK_H
-#define BLOCK_H
-
-#include "block/aio.h"
-#include "block/aio-wait.h"
-#include "qemu/iov.h"
-#include "qemu/coroutine.h"
-#include "block/accounting.h"
-#include "block/dirty-bitmap.h"
-#include "block/blockjob.h"
-#include "qemu/hbitmap.h"
-
-/* block.c */
-typedef struct BlockDriver BlockDriver;
-typedef struct BdrvChild BdrvChild;
-typedef struct BdrvChildClass BdrvChildClass;
-
-typedef struct BlockDriverInfo {
- /* in bytes, 0 if irrelevant */
- int cluster_size;
- /* offset at which the VM state can be saved (0 if not possible) */
- int64_t vm_state_offset;
- bool is_dirty;
- /*
- * True if this block driver only supports compressed writes
- */
- bool needs_compressed_writes;
-} BlockDriverInfo;
-
-typedef struct BlockFragInfo {
- uint64_t allocated_clusters;
- uint64_t total_clusters;
- uint64_t fragmented_clusters;
- uint64_t compressed_clusters;
-} BlockFragInfo;
-
-typedef enum {
- BDRV_REQ_COPY_ON_READ = 0x1,
- BDRV_REQ_ZERO_WRITE = 0x2,
-
- /*
- * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
- * that the block driver should unmap (discard) blocks if it is guaranteed
- * that the result will read back as zeroes. The flag is only passed to the
- * driver if the block device is opened with BDRV_O_UNMAP.
- */
- BDRV_REQ_MAY_UNMAP = 0x4,
-
- BDRV_REQ_FUA = 0x10,
- BDRV_REQ_WRITE_COMPRESSED = 0x20,
-
- /* Signifies that this write request will not change the visible disk
- * content. */
- BDRV_REQ_WRITE_UNCHANGED = 0x40,
-
- /*
- * BDRV_REQ_SERIALISING forces request serialisation for writes.
- * It is used to ensure that writes to the backing file of a backup process
- * target cannot race with a read of the backup target that defers to the
- * backing file.
- *
- * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
- * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
- * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
- */
- BDRV_REQ_SERIALISING = 0x80,
-
- /* Execute the request only if the operation can be offloaded or otherwise
- * be executed efficiently, but return an error instead of using a slow
- * fallback. */
- BDRV_REQ_NO_FALLBACK = 0x100,
-
- /*
- * BDRV_REQ_PREFETCH may be used only together with BDRV_REQ_COPY_ON_READ
- * on read request and means that caller doesn't really need data to be
- * written to qiov parameter which may be NULL.
- */
- BDRV_REQ_PREFETCH = 0x200,
- /* Mask of valid flags */
- BDRV_REQ_MASK = 0x3ff,
-} BdrvRequestFlags;
-
-typedef struct BlockSizes {
- uint32_t phys;
- uint32_t log;
-} BlockSizes;
-
-typedef struct HDGeometry {
- uint32_t heads;
- uint32_t sectors;
- uint32_t cylinders;
-} HDGeometry;
-
-#define BDRV_O_RDWR 0x0002
-#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
-#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
-#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
-#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
-#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
-#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
-#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
-#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
-#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
-#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
-#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
-#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
-#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
- select an appropriate protocol driver,
- ignoring the format layer */
-#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
-#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
-#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */
-
-#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
-
-
-/* Option names of options parsed by the block layer */
-
-#define BDRV_OPT_CACHE_WB "cache.writeback"
-#define BDRV_OPT_CACHE_DIRECT "cache.direct"
-#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
-#define BDRV_OPT_READ_ONLY "read-only"
-#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
-#define BDRV_OPT_DISCARD "discard"
-#define BDRV_OPT_FORCE_SHARE "force-share"
-
-
-#define BDRV_SECTOR_BITS 9
-#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
-
-#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
- INT_MAX >> BDRV_SECTOR_BITS)
-#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
-
/*
- * Allocation status flags for bdrv_block_status() and friends.
- *
- * Public flags:
- * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
- * BDRV_BLOCK_ZERO: offset reads as zero
- * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
- * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
- * layer rather than any backing, set by block layer
- * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
- * layer, set by block layer
+ * QEMU System Emulator block driver
*
- * Internal flags:
- * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
- * that the block layer recompute the answer from the returned
- * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
- * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for
- * zeroes in file child of current block node inside
- * returned region. Only valid together with both
- * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not
- * appear with BDRV_BLOCK_ZERO.
+ * Copyright (c) 2003 Fabrice Bellard
*
- * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
- * host offset within the returned BDS that is allocated for the
- * corresponding raw guest data. However, whether that offset
- * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * DATA ZERO OFFSET_VALID
- * t t t sectors read as zero, returned file is zero at offset
- * t f t sectors read as valid from file at offset
- * f t t sectors preallocated, read as zero, returned file not
- * necessarily zero at offset
- * f f t sectors preallocated but read from backing_hd,
- * returned file contains garbage at offset
- * t t f sectors preallocated, read as zero, unknown offset
- * t f f sectors read from unknown file or offset
- * f t f not allocated or unknown offset, read as zero
- * f f f not allocated or unknown offset, read from backing_hd
- */
-#define BDRV_BLOCK_DATA 0x01
-#define BDRV_BLOCK_ZERO 0x02
-#define BDRV_BLOCK_OFFSET_VALID 0x04
-#define BDRV_BLOCK_RAW 0x08
-#define BDRV_BLOCK_ALLOCATED 0x10
-#define BDRV_BLOCK_EOF 0x20
-#define BDRV_BLOCK_RECURSE 0x40
-
-typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
-
-typedef struct BDRVReopenState {
- BlockDriverState *bs;
- int flags;
- BlockdevDetectZeroesOptions detect_zeroes;
- bool backing_missing;
- bool replace_backing_bs; /* new_backing_bs is ignored if this is false */
- BlockDriverState *new_backing_bs; /* If NULL then detach the current bs */
- uint64_t perm, shared_perm;
- QDict *options;
- QDict *explicit_options;
- void *opaque;
-} BDRVReopenState;
-
-/*
- * Block operation types
- */
-typedef enum BlockOpType {
- BLOCK_OP_TYPE_BACKUP_SOURCE,
- BLOCK_OP_TYPE_BACKUP_TARGET,
- BLOCK_OP_TYPE_CHANGE,
- BLOCK_OP_TYPE_COMMIT_SOURCE,
- BLOCK_OP_TYPE_COMMIT_TARGET,
- BLOCK_OP_TYPE_DATAPLANE,
- BLOCK_OP_TYPE_DRIVE_DEL,
- BLOCK_OP_TYPE_EJECT,
- BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
- BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
- BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
- BLOCK_OP_TYPE_MIRROR_SOURCE,
- BLOCK_OP_TYPE_MIRROR_TARGET,
- BLOCK_OP_TYPE_RESIZE,
- BLOCK_OP_TYPE_STREAM,
- BLOCK_OP_TYPE_REPLACE,
- BLOCK_OP_TYPE_MAX,
-} BlockOpType;
-
-/* Block node permission constants */
-enum {
- /**
- * A user that has the "permission" of consistent reads is guaranteed that
- * their view of the contents of the block device is complete and
- * self-consistent, representing the contents of a disk at a specific
- * point.
- *
- * For most block devices (including their backing files) this is true, but
- * the property cannot be maintained in a few situations like for
- * intermediate nodes of a commit block job.
- */
- BLK_PERM_CONSISTENT_READ = 0x01,
-
- /** This permission is required to change the visible disk contents. */
- BLK_PERM_WRITE = 0x02,
-
- /**
- * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
- * required for writes to the block node when the caller promises that
- * the visible disk content doesn't change.
- *
- * As the BLK_PERM_WRITE permission is strictly stronger, either is
- * sufficient to perform an unchanging write.
- */
- BLK_PERM_WRITE_UNCHANGED = 0x04,
-
- /** This permission is required to change the size of a block node. */
- BLK_PERM_RESIZE = 0x08,
-
- /**
- * This permission is required to change the node that this BdrvChild
- * points to.
- */
- BLK_PERM_GRAPH_MOD = 0x10,
-
- BLK_PERM_ALL = 0x1f,
-
- DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
- | BLK_PERM_WRITE
- | BLK_PERM_WRITE_UNCHANGED
- | BLK_PERM_RESIZE,
-
- DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
-};
-
-/*
- * Flags that parent nodes assign to child nodes to specify what kind of
- * role(s) they take.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * At least one of DATA, METADATA, FILTERED, or COW must be set for
- * every child.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
*/
-enum BdrvChildRoleBits {
- /*
- * This child stores data.
- * Any node may have an arbitrary number of such children.
- */
- BDRV_CHILD_DATA = (1 << 0),
-
- /*
- * This child stores metadata.
- * Any node may have an arbitrary number of metadata-storing
- * children.
- */
- BDRV_CHILD_METADATA = (1 << 1),
-
- /*
- * A child that always presents exactly the same visible data as
- * the parent, e.g. by virtue of the parent forwarding all reads
- * and writes.
- * This flag is mutually exclusive with DATA, METADATA, and COW.
- * Any node may have at most one filtered child at a time.
- */
- BDRV_CHILD_FILTERED = (1 << 2),
-
- /*
- * Child from which to read all data that isn’t allocated in the
- * parent (i.e., the backing child); such data is copied to the
- * parent through COW (and optionally COR).
- * This field is mutually exclusive with DATA, METADATA, and
- * FILTERED.
- * Any node may have at most one such backing child at a time.
- */
- BDRV_CHILD_COW = (1 << 3),
-
- /*
- * The primary child. For most drivers, this is the child whose
- * filename applies best to the parent node.
- * Any node may have at most one primary child at a time.
- */
- BDRV_CHILD_PRIMARY = (1 << 4),
-
- /* Useful combination of flags */
- BDRV_CHILD_IMAGE = BDRV_CHILD_DATA
- | BDRV_CHILD_METADATA
- | BDRV_CHILD_PRIMARY,
-};
-
-/* Mask of BdrvChildRoleBits values */
-typedef unsigned int BdrvChildRole;
-
-char *bdrv_perm_names(uint64_t perm);
-uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm);
-
-/* disk I/O throttling */
-void bdrv_init(void);
-void bdrv_init_with_whitelist(void);
-bool bdrv_uses_whitelist(void);
-int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
-BlockDriver *bdrv_find_protocol(const char *filename,
- bool allow_protocol_prefix,
- Error **errp);
-BlockDriver *bdrv_find_format(const char *format_name);
-int bdrv_create(BlockDriver *drv, const char* filename,
- QemuOpts *opts, Error **errp);
-int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
-
-BlockDriverState *bdrv_new(void);
-void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
- Error **errp);
-void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
- Error **errp);
-
-int bdrv_parse_aio(const char *mode, int *flags);
-int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
-int bdrv_parse_discard_flags(const char *mode, int *flags);
-BdrvChild *bdrv_open_child(const char *filename,
- QDict *options, const char *bdref_key,
- BlockDriverState* parent,
- const BdrvChildClass *child_class,
- BdrvChildRole child_role,
- bool allow_none, Error **errp);
-BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
-void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
- Error **errp);
-int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
- const char *bdref_key, Error **errp);
-BlockDriverState *bdrv_open(const char *filename, const char *reference,
- QDict *options, int flags, Error **errp);
-BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
- int flags, Error **errp);
-BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
- BlockDriverState *bs, QDict *options,
- bool keep_old_opts);
-int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
-int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
- Error **errp);
-int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
- BlockReopenQueue *queue, Error **errp);
-void bdrv_reopen_commit(BDRVReopenState *reopen_state);
-void bdrv_reopen_abort(BDRVReopenState *reopen_state);
-int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
-int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
-int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
-int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
-int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
-int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
- const void *buf, int count);
-/*
- * Efficiently zero a region of the disk image. Note that this is a regular
- * I/O request like read or write and should have a reasonable size. This
- * function is not suitable for zeroing the entire image in a single request
- * because it may allocate memory for the entire region.
- */
-int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
- const char *backing_file);
-void bdrv_refresh_filename(BlockDriverState *bs);
-
-int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
- PreallocMode prealloc, BdrvRequestFlags flags,
- Error **errp);
-int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
- PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
-
-int64_t bdrv_nb_sectors(BlockDriverState *bs);
-int64_t bdrv_getlength(BlockDriverState *bs);
-int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
-BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
- BlockDriverState *in_bs, Error **errp);
-void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
-void bdrv_refresh_limits(BlockDriverState *bs, Error **errp);
-int bdrv_commit(BlockDriverState *bs);
-int bdrv_make_empty(BdrvChild *c, Error **errp);
-int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
- const char *backing_fmt, bool warn);
-void bdrv_register(BlockDriver *bdrv);
-int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
- const char *backing_file_str);
-BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
- BlockDriverState *bs);
-BlockDriverState *bdrv_find_base(BlockDriverState *bs);
-bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
- Error **errp);
-int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
- Error **errp);
-void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
-int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
-
-
-typedef struct BdrvCheckResult {
- int corruptions;
- int leaks;
- int check_errors;
- int corruptions_fixed;
- int leaks_fixed;
- int64_t image_end_offset;
- BlockFragInfo bfi;
-} BdrvCheckResult;
-
-typedef enum {
- BDRV_FIX_LEAKS = 1,
- BDRV_FIX_ERRORS = 2,
-} BdrvCheckMode;
-
-int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
-
-/* The units of offset and total_work_size may be chosen arbitrarily by the
- * block driver; total_work_size may change during the course of the amendment
- * operation */
-typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
- int64_t total_work_size, void *opaque);
-int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
- BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
- bool force,
- Error **errp);
-
-/* check if a named node can be replaced when doing drive-mirror */
-BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
- const char *node_name, Error **errp);
-
-/* async block I/O */
-void bdrv_aio_cancel(BlockAIOCB *acb);
-void bdrv_aio_cancel_async(BlockAIOCB *acb);
-
-/* sg packet commands */
-int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
-
-/* Invalidate any cached metadata used by image formats */
-void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
-void bdrv_invalidate_cache_all(Error **errp);
-int bdrv_inactivate_all(void);
-
-/* Ensure contents are flushed to disk. */
-int bdrv_flush(BlockDriverState *bs);
-int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
-int bdrv_flush_all(void);
-void bdrv_close_all(void);
-void bdrv_drain(BlockDriverState *bs);
-void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
-void bdrv_drain_all_begin(void);
-void bdrv_drain_all_end(void);
-void bdrv_drain_all(void);
-
-#define BDRV_POLL_WHILE(bs, cond) ({ \
- BlockDriverState *bs_ = (bs); \
- AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
- cond); })
-
-int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
-int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
-int bdrv_has_zero_init_1(BlockDriverState *bs);
-int bdrv_has_zero_init(BlockDriverState *bs);
-bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
-int bdrv_block_status(BlockDriverState *bs, int64_t offset,
- int64_t bytes, int64_t *pnum, int64_t *map,
- BlockDriverState **file);
-int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
- int64_t offset, int64_t bytes, int64_t *pnum,
- int64_t *map, BlockDriverState **file);
-int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
- int64_t *pnum);
-int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
- bool include_base, int64_t offset, int64_t bytes,
- int64_t *pnum);
-
-bool bdrv_is_read_only(BlockDriverState *bs);
-int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
- bool ignore_allow_rdw, Error **errp);
-int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
- Error **errp);
-bool bdrv_is_writable(BlockDriverState *bs);
-bool bdrv_is_sg(BlockDriverState *bs);
-bool bdrv_is_inserted(BlockDriverState *bs);
-void bdrv_lock_medium(BlockDriverState *bs, bool locked);
-void bdrv_eject(BlockDriverState *bs, bool eject_flag);
-const char *bdrv_get_format_name(BlockDriverState *bs);
-BlockDriverState *bdrv_find_node(const char *node_name);
-BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
-XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
-BlockDriverState *bdrv_lookup_bs(const char *device,
- const char *node_name,
- Error **errp);
-bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
-BlockDriverState *bdrv_next_node(BlockDriverState *bs);
-BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
-
-typedef struct BdrvNextIterator {
- enum {
- BDRV_NEXT_BACKEND_ROOTS,
- BDRV_NEXT_MONITOR_OWNED,
- } phase;
- BlockBackend *blk;
- BlockDriverState *bs;
-} BdrvNextIterator;
-
-BlockDriverState *bdrv_first(BdrvNextIterator *it);
-BlockDriverState *bdrv_next(BdrvNextIterator *it);
-void bdrv_next_cleanup(BdrvNextIterator *it);
-
-BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
-bool bdrv_is_encrypted(BlockDriverState *bs);
-void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
- void *opaque, bool read_only);
-const char *bdrv_get_node_name(const BlockDriverState *bs);
-const char *bdrv_get_device_name(const BlockDriverState *bs);
-const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
-int bdrv_get_flags(BlockDriverState *bs);
-int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
-ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
- Error **errp);
-BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
-void bdrv_round_to_clusters(BlockDriverState *bs,
- int64_t offset, int64_t bytes,
- int64_t *cluster_offset,
- int64_t *cluster_bytes);
-
-void bdrv_get_backing_filename(BlockDriverState *bs,
- char *filename, int filename_size);
-char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
-char *bdrv_get_full_backing_filename_from_filename(const char *backed,
- const char *backing,
- Error **errp);
-char *bdrv_dirname(BlockDriverState *bs, Error **errp);
-
-int path_has_protocol(const char *path);
-int path_is_absolute(const char *path);
-char *path_combine(const char *base_path, const char *filename);
-
-int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
-int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
-int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
- int64_t pos, int size);
-
-int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
- int64_t pos, int size);
-
-void bdrv_img_create(const char *filename, const char *fmt,
- const char *base_filename, const char *base_fmt,
- char *options, uint64_t img_size, int flags,
- bool quiet, Error **errp);
-
-/* Returns the alignment in bytes that is required so that no bounce buffer
- * is required throughout the stack */
-size_t bdrv_min_mem_align(BlockDriverState *bs);
-/* Returns optimal alignment in bytes for bounce buffer */
-size_t bdrv_opt_mem_align(BlockDriverState *bs);
-void *qemu_blockalign(BlockDriverState *bs, size_t size);
-void *qemu_blockalign0(BlockDriverState *bs, size_t size);
-void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
-void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
-bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
-
-void bdrv_enable_copy_on_read(BlockDriverState *bs);
-void bdrv_disable_copy_on_read(BlockDriverState *bs);
-
-void bdrv_ref(BlockDriverState *bs);
-void bdrv_unref(BlockDriverState *bs);
-void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
-BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
- BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildClass *child_class,
- BdrvChildRole child_role,
- Error **errp);
-
-bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
-void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
-void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
-void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
-void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
-bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
-
-#define BLKDBG_EVENT(child, evt) \
- do { \
- if (child) { \
- bdrv_debug_event(child->bs, evt); \
- } \
- } while (0)
-
-void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
-
-int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
- const char *tag);
-int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
-int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
-bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
-
-/**
- * bdrv_get_aio_context:
- *
- * Returns: the currently bound #AioContext
- */
-AioContext *bdrv_get_aio_context(BlockDriverState *bs);
-
-/**
- * Transfer control to @co in the aio context of @bs
- */
-void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
-
-void bdrv_set_aio_context_ignore(BlockDriverState *bs,
- AioContext *new_context, GSList **ignore);
-int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
- Error **errp);
-int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
- BdrvChild *ignore_child, Error **errp);
-bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
- GSList **ignore, Error **errp);
-bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
- GSList **ignore, Error **errp);
-int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
-int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
-
-void bdrv_io_plug(BlockDriverState *bs);
-void bdrv_io_unplug(BlockDriverState *bs);
-
-/**
- * bdrv_parent_drained_begin_single:
- *
- * Begin a quiesced section for the parent of @c. If @poll is true, wait for
- * any pending activity to cease.
- */
-void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
-
-/**
- * bdrv_parent_drained_end_single:
- *
- * End a quiesced section for the parent of @c.
- *
- * This polls @bs's AioContext until all scheduled sub-drained_ends
- * have settled, which may result in graph changes.
- */
-void bdrv_parent_drained_end_single(BdrvChild *c);
-
-/**
- * bdrv_drain_poll:
- *
- * Poll for pending requests in @bs, its parents (except for @ignore_parent),
- * and if @recursive is true its children as well (used for subtree drain).
- *
- * If @ignore_bds_parents is true, parents that are BlockDriverStates must
- * ignore the drain request because they will be drained separately (used for
- * drain_all).
- *
- * This is part of bdrv_drained_begin.
- */
-bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
- BdrvChild *ignore_parent, bool ignore_bds_parents);
-
-/**
- * bdrv_drained_begin:
- *
- * Begin a quiesced section for exclusive access to the BDS, by disabling
- * external request sources including NBD server and device model. Note that
- * this doesn't block timers or coroutines from submitting more requests, which
- * means block_job_pause is still necessary.
- *
- * This function can be recursive.
- */
-void bdrv_drained_begin(BlockDriverState *bs);
-
-/**
- * bdrv_do_drained_begin_quiesce:
- *
- * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
- * running requests to complete.
- */
-void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
- BdrvChild *parent, bool ignore_bds_parents);
-
-/**
- * Like bdrv_drained_begin, but recursively begins a quiesced section for
- * exclusive access to all child nodes as well.
- */
-void bdrv_subtree_drained_begin(BlockDriverState *bs);
-
-/**
- * bdrv_drained_end:
- *
- * End a quiescent section started by bdrv_drained_begin().
- *
- * This polls @bs's AioContext until all scheduled sub-drained_ends
- * have settled. On one hand, that may result in graph changes. On
- * the other, this requires that the caller either runs in the main
- * loop; or that all involved nodes (@bs and all of its parents) are
- * in the caller's AioContext.
- */
-void bdrv_drained_end(BlockDriverState *bs);
-
-/**
- * bdrv_drained_end_no_poll:
- *
- * Same as bdrv_drained_end(), but do not poll for the subgraph to
- * actually become unquiesced. Therefore, no graph changes will occur
- * with this function.
- *
- * *drained_end_counter is incremented for every background operation
- * that is scheduled, and will be decremented for every operation once
- * it settles. The caller must poll until it reaches 0. The counter
- * should be accessed using atomic operations only.
- */
-void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
-
-/**
- * End a quiescent section started by bdrv_subtree_drained_begin().
- */
-void bdrv_subtree_drained_end(BlockDriverState *bs);
+#ifndef BLOCK_H
+#define BLOCK_H
-void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
- Error **errp);
-void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
+#include "block/block-global-state.h"
+#include "block/block-io.h"
-bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
- uint32_t granularity, Error **errp);
-/**
- *
- * bdrv_register_buf/bdrv_unregister_buf:
- *
- * Register/unregister a buffer for I/O. For example, VFIO drivers are
- * interested to know the memory areas that would later be used for I/O, so
- * that they can prepare IOMMU mapping etc., to get better performance.
- */
-void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size);
-void bdrv_unregister_buf(BlockDriverState *bs, void *host);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
-/**
- *
- * bdrv_co_copy_range:
- *
- * Do offloaded copy between two children. If the operation is not implemented
- * by the driver, or if the backend storage doesn't support it, a negative
- * error code will be returned.
- *
- * Note: block layer doesn't emulate or fallback to a bounce buffer approach
- * because usually the caller shouldn't attempt offloaded copy any more (e.g.
- * calling copy_file_range(2)) after the first error, thus it should fall back
- * to a read+write path in the caller level.
- *
- * @src: Source child to copy data from
- * @src_offset: offset in @src image to read data
- * @dst: Destination child to copy data to
- * @dst_offset: offset in @dst image to write data
- * @bytes: number of bytes to copy
- * @flags: request flags. Supported flags:
- * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
- * write on @dst as if bdrv_co_pwrite_zeroes is
- * called. Used to simplify caller code, or
- * during BlockDriver.bdrv_co_copy_range_from()
- * recursion.
- * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
- * requests currently in flight.
- *
- * Returns: 0 if succeeded; negative error code if failed.
- **/
-int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes, BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-#endif
+#endif /* BLOCK_H */
diff --git a/include/block/block_backup.h b/include/block/block_backup.h
index 157596c296..4d4d5ba153 100644
--- a/include/block/block_backup.h
+++ b/include/block/block_backup.h
@@ -18,7 +18,7 @@
#ifndef BLOCK_BACKUP_H
#define BLOCK_BACKUP_H
-#include "block/block_int.h"
+#include "block/blockjob.h"
void backup_do_checkpoint(BlockJob *job, Error **errp);
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
new file mode 100644
index 0000000000..761276127e
--- /dev/null
+++ b/include/block/block_int-common.h
@@ -0,0 +1,1321 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_INT_COMMON_H
+#define BLOCK_INT_COMMON_H
+
+#include "block/aio.h"
+#include "block/block-common.h"
+#include "block/block-global-state.h"
+#include "block/snapshot.h"
+#include "qemu/iov.h"
+#include "qemu/rcu.h"
+#include "qemu/stats64.h"
+
+#define BLOCK_FLAG_LAZY_REFCOUNTS 8
+
+#define BLOCK_OPT_SIZE "size"
+#define BLOCK_OPT_ENCRYPT "encryption"
+#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
+#define BLOCK_OPT_COMPAT6 "compat6"
+#define BLOCK_OPT_HWVERSION "hwversion"
+#define BLOCK_OPT_BACKING_FILE "backing_file"
+#define BLOCK_OPT_BACKING_FMT "backing_fmt"
+#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
+#define BLOCK_OPT_TABLE_SIZE "table_size"
+#define BLOCK_OPT_PREALLOC "preallocation"
+#define BLOCK_OPT_SUBFMT "subformat"
+#define BLOCK_OPT_COMPAT_LEVEL "compat"
+#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
+#define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
+#define BLOCK_OPT_REDUNDANCY "redundancy"
+#define BLOCK_OPT_NOCOW "nocow"
+#define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint"
+#define BLOCK_OPT_OBJECT_SIZE "object_size"
+#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
+#define BLOCK_OPT_DATA_FILE "data_file"
+#define BLOCK_OPT_DATA_FILE_RAW "data_file_raw"
+#define BLOCK_OPT_COMPRESSION_TYPE "compression_type"
+#define BLOCK_OPT_EXTL2 "extended_l2"
+
+#define BLOCK_PROBE_BUF_SIZE 512
+
+enum BdrvTrackedRequestType {
+ BDRV_TRACKED_READ,
+ BDRV_TRACKED_WRITE,
+ BDRV_TRACKED_DISCARD,
+ BDRV_TRACKED_TRUNCATE,
+};
+
+/*
+ * That is not quite good that BdrvTrackedRequest structure is public,
+ * as block/io.c is very careful about incoming offset/bytes being
+ * correct. Be sure to assert bdrv_check_request() succeeded after any
+ * modification of BdrvTrackedRequest object out of block/io.c
+ */
+typedef struct BdrvTrackedRequest {
+ BlockDriverState *bs;
+ int64_t offset;
+ int64_t bytes;
+ enum BdrvTrackedRequestType type;
+
+ bool serialising;
+ int64_t overlap_offset;
+ int64_t overlap_bytes;
+
+ QLIST_ENTRY(BdrvTrackedRequest) list;
+ Coroutine *co; /* owner, used for deadlock detection */
+ CoQueue wait_queue; /* coroutines blocked on this request */
+
+ struct BdrvTrackedRequest *waiting_for;
+} BdrvTrackedRequest;
+
+
+struct BlockDriver {
+ /*
+ * These fields are initialized when this object is created,
+ * and are never changed afterwards.
+ */
+
+ const char *format_name;
+ int instance_size;
+
+ /*
+ * Set to true if the BlockDriver is a block filter. Block filters pass
+ * certain callbacks that refer to data (see block.c) to their bs->file
+ * or bs->backing (whichever one exists) if the driver doesn't implement
+ * them. Drivers that do not wish to forward must implement them and return
+ * -ENOTSUP.
+ * Note that filters are not allowed to modify data.
+ *
+ * Filters generally cannot have more than a single filtered child,
+ * because the data they present must at all times be the same as
+ * that on their filtered child. That would be impossible to
+ * achieve for multiple filtered children.
+ * (And this filtered child must then be bs->file or bs->backing.)
+ */
+ bool is_filter;
+ /*
+ * Only make sense for filter drivers, for others must be false.
+ * If true, filtered child is bs->backing. Otherwise it's bs->file.
+ * Two internal filters use bs->backing as filtered child and has this
+ * field set to true: mirror_top and commit_top. There also two such test
+ * filters in tests/unit/test-bdrv-graph-mod.c.
+ *
+ * Never create any more such filters!
+ *
+ * TODO: imagine how to deprecate this behavior and make all filters work
+ * similarly using bs->file as filtered child.
+ */
+ bool filtered_child_is_backing;
+
+ /*
+ * Set to true if the BlockDriver is a format driver. Format nodes
+ * generally do not expect their children to be other format nodes
+ * (except for backing files), and so format probing is disabled
+ * on those children.
+ */
+ bool is_format;
+
+ /*
+ * Set to true if the BlockDriver supports zoned children.
+ */
+ bool supports_zoned_children;
+
+ /*
+ * Drivers not implementing bdrv_parse_filename nor bdrv_open should have
+ * this field set to true, except ones that are defined only by their
+ * child's bs.
+ * An example of the last type will be the quorum block driver.
+ */
+ bool bdrv_needs_filename;
+
+ /*
+ * Set if a driver can support backing files. This also implies the
+ * following semantics:
+ *
+ * - Return status 0 of .bdrv_co_block_status means that corresponding
+ * blocks are not allocated in this layer of backing-chain
+ * - For such (unallocated) blocks, read will:
+ * - fill buffer with zeros if there is no backing file
+ * - read from the backing file otherwise, where the block layer
+ * takes care of reading zeros beyond EOF if backing file is short
+ */
+ bool supports_backing;
+
+ /*
+ * Drivers setting this field must be able to work with just a plain
+ * filename with '<protocol_name>:' as a prefix, and no other options.
+ * Options may be extracted from the filename by implementing
+ * bdrv_parse_filename.
+ */
+ const char *protocol_name;
+
+ /* List of options for creating images, terminated by name == NULL */
+ QemuOptsList *create_opts;
+
+ /* List of options for image amend */
+ QemuOptsList *amend_opts;
+
+ /*
+ * If this driver supports reopening images this contains a
+ * NULL-terminated list of the runtime options that can be
+ * modified. If an option in this list is unspecified during
+ * reopen then it _must_ be reset to its default value or return
+ * an error.
+ */
+ const char *const *mutable_opts;
+
+ /*
+ * Pointer to a NULL-terminated array of names of strong options
+ * that can be specified for bdrv_open(). A strong option is one
+ * that changes the data of a BDS.
+ * If this pointer is NULL, the array is considered empty.
+ * "filename" and "driver" are always considered strong.
+ */
+ const char *const *strong_runtime_opts;
+
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
+ * This function is invoked under BQL before .bdrv_co_amend()
+ * (which in contrast does not necessarily run under the BQL)
+ * to allow driver-specific initialization code that requires
+ * the BQL, like setting up specific permission flags.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_amend_pre_run)(
+ BlockDriverState *bs, Error **errp);
+ /*
+ * This function is invoked under BQL after .bdrv_co_amend()
+ * to allow cleaning up what was done in .bdrv_amend_pre_run().
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_amend_clean)(BlockDriverState *bs);
+
+ /*
+ * Return true if @to_replace can be replaced by a BDS with the
+ * same data as @bs without it affecting @bs's behavior (that is,
+ * without it being visible to @bs's parents).
+ */
+ bool GRAPH_RDLOCK_PTR (*bdrv_recurse_can_replace)(
+ BlockDriverState *bs, BlockDriverState *to_replace);
+
+ int (*bdrv_probe_device)(const char *filename);
+
+ /*
+ * Any driver implementing this callback is expected to be able to handle
+ * NULL file names in its .bdrv_open() implementation.
+ */
+ void (*bdrv_parse_filename)(const char *filename, QDict *options,
+ Error **errp);
+
+ /* For handling image reopen for split or non-split files. */
+ int GRAPH_UNLOCKED_PTR (*bdrv_reopen_prepare)(
+ BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit)(
+ BDRVReopenState *reopen_state);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit_post)(
+ BDRVReopenState *reopen_state);
+ void GRAPH_UNLOCKED_PTR (*bdrv_reopen_abort)(
+ BDRVReopenState *reopen_state);
+ void (*bdrv_join_options)(QDict *options, QDict *old_options);
+
+ int GRAPH_UNLOCKED_PTR (*bdrv_open)(
+ BlockDriverState *bs, QDict *options, int flags, Error **errp);
+
+ /* Protocol drivers should implement this instead of bdrv_open */
+ int GRAPH_UNLOCKED_PTR (*bdrv_file_open)(
+ BlockDriverState *bs, QDict *options, int flags, Error **errp);
+ void (*bdrv_close)(BlockDriverState *bs);
+
+ int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create)(
+ BlockdevCreateOptions *opts, Error **errp);
+
+ int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create_opts)(
+ BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_amend_options)(
+ BlockDriverState *bs, QemuOpts *opts,
+ BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
+ bool force, Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_make_empty)(BlockDriverState *bs);
+
+ /*
+ * Refreshes the bs->exact_filename field. If that is impossible,
+ * bs->exact_filename has to be left empty.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_refresh_filename)(BlockDriverState *bs);
+
+ /*
+ * Gathers the open options for all children into @target.
+ * A simple format driver (without backing file support) might
+ * implement this function like this:
+ *
+ * QINCREF(bs->file->bs->full_open_options);
+ * qdict_put(target, "file", bs->file->bs->full_open_options);
+ *
+ * If not specified, the generic implementation will simply put
+ * all children's options under their respective name.
+ *
+ * @backing_overridden is true when bs->backing seems not to be
+ * the child that would result from opening bs->backing_file.
+ * Therefore, if it is true, the backing child's options should be
+ * gathered; otherwise, there is no need since the backing child
+ * is the one implied by the image header.
+ *
+ * Note that ideally this function would not be needed. Every
+ * block driver which implements it is probably doing something
+ * shady regarding its runtime option structure.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_gather_child_options)(
+ BlockDriverState *bs, QDict *target, bool backing_overridden);
+
+ /*
+ * Returns an allocated string which is the directory name of this BDS: It
+ * will be used to make relative filenames absolute by prepending this
+ * function's return value to them.
+ */
+ char * GRAPH_RDLOCK_PTR (*bdrv_dirname)(BlockDriverState *bs, Error **errp);
+
+ /*
+ * This informs the driver that we are no longer interested in the result
+ * of in-flight requests, so don't waste the time if possible.
+ *
+ * One example usage is to avoid waiting for an nbd target node reconnect
+ * timeout during job-cancel with force=true.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_cancel_in_flight)(BlockDriverState *bs);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_inactivate)(BlockDriverState *bs);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_create)(
+ BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
+
+ int GRAPH_UNLOCKED_PTR (*bdrv_snapshot_goto)(
+ BlockDriverState *bs, const char *snapshot_id);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_delete)(
+ BlockDriverState *bs, const char *snapshot_id, const char *name,
+ Error **errp);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_list)(
+ BlockDriverState *bs, QEMUSnapshotInfo **psn_info);
+
+ int GRAPH_RDLOCK_PTR (*bdrv_snapshot_load_tmp)(
+ BlockDriverState *bs, const char *snapshot_id, const char *name,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_change_backing_file)(
+ BlockDriverState *bs, const char *backing_file,
+ const char *backing_fmt);
+
+ /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
+ int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
+ const char *tag);
+ int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
+ const char *tag);
+ int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
+ bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
+
+ void GRAPH_RDLOCK_PTR (*bdrv_refresh_limits)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Returns 1 if newly created images are guaranteed to contain only
+ * zeros, 0 otherwise.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_has_zero_init)(BlockDriverState *bs);
+
+ /*
+ * Remove fd handlers, timers, and other event loop callbacks so the event
+ * loop is no longer in use. Called with no in-flight requests and in
+ * depth-first traversal order with parents before child nodes.
+ */
+ void (*bdrv_detach_aio_context)(BlockDriverState *bs);
+
+ /*
+ * Add fd handlers, timers, and other event loop callbacks so I/O requests
+ * can be processed again. Called with no in-flight requests and in
+ * depth-first traversal order with child nodes before parent nodes.
+ */
+ void (*bdrv_attach_aio_context)(BlockDriverState *bs,
+ AioContext *new_context);
+
+ /**
+ * bdrv_drain_begin is called if implemented in the beginning of a
+ * drain operation to drain and stop any internal sources of requests in
+ * the driver.
+ * bdrv_drain_end is called if implemented at the end of the drain.
+ *
+ * They should be used by the driver to e.g. manage scheduled I/O
+ * requests, or toggle an internal state. After the end of the drain new
+ * requests will continue normally.
+ *
+ * Implementations of both functions must not call aio_poll().
+ */
+ void (*bdrv_drain_begin)(BlockDriverState *bs);
+ void (*bdrv_drain_end)(BlockDriverState *bs);
+
+ /**
+ * Try to get @bs's logical and physical block size.
+ * On success, store them in @bsz and return zero.
+ * On failure, return negative errno.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_probe_blocksizes)(
+ BlockDriverState *bs, BlockSizes *bsz);
+ /**
+ * Try to get @bs's geometry (cyls, heads, sectors)
+ * On success, store them in @geo and return 0.
+ * On failure return -errno.
+ * Only drivers that want to override guest geometry implement this
+ * callback; see hd_geometry_guess().
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_probe_geometry)(
+ BlockDriverState *bs, HDGeometry *geo);
+
+ void GRAPH_WRLOCK_PTR (*bdrv_add_child)(
+ BlockDriverState *parent, BlockDriverState *child, Error **errp);
+
+ void GRAPH_WRLOCK_PTR (*bdrv_del_child)(
+ BlockDriverState *parent, BdrvChild *child, Error **errp);
+
+ /**
+ * Informs the block driver that a permission change is intended. The
+ * driver checks whether the change is permissible and may take other
+ * preparations for the change (e.g. get file system locks). This operation
+ * is always followed either by a call to either .bdrv_set_perm or
+ * .bdrv_abort_perm_update.
+ *
+ * Checks whether the requested set of cumulative permissions in @perm
+ * can be granted for accessing @bs and whether no other users are using
+ * permissions other than those given in @shared (both arguments take
+ * BLK_PERM_* bitmasks).
+ *
+ * If both conditions are met, 0 is returned. Otherwise, -errno is returned
+ * and errp is set to an error describing the conflict.
+ */
+ int GRAPH_RDLOCK_PTR (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
+ uint64_t shared, Error **errp);
+
+ /**
+ * Called to inform the driver that the set of cumulative set of used
+ * permissions for @bs has changed to @perm, and the set of shareable
+ * permission to @shared. The driver can use this to propagate changes to
+ * its children (i.e. request permissions only if a parent actually needs
+ * them).
+ *
+ * This function is only invoked after bdrv_check_perm(), so block drivers
+ * may rely on preparations made in their .bdrv_check_perm implementation.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_set_perm)(
+ BlockDriverState *bs, uint64_t perm, uint64_t shared);
+
+ /*
+ * Called to inform the driver that after a previous bdrv_check_perm()
+ * call, the permission update is not performed and any preparations made
+ * for it (e.g. taken file locks) need to be undone.
+ *
+ * This function can be called even for nodes that never saw a
+ * bdrv_check_perm() call. It is a no-op then.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_abort_perm_update)(BlockDriverState *bs);
+
+ /**
+ * Returns in @nperm and @nshared the permissions that the driver for @bs
+ * needs on its child @c, based on the cumulative permissions requested by
+ * the parents in @parent_perm and @parent_shared.
+ *
+ * If @c is NULL, return the permissions for attaching a new child for the
+ * given @child_class and @role.
+ *
+ * If @reopen_queue is non-NULL, don't return the currently needed
+ * permissions, but those that will be needed after applying the
+ * @reopen_queue.
+ */
+ void GRAPH_RDLOCK_PTR (*bdrv_child_perm)(
+ BlockDriverState *bs, BdrvChild *c, BdrvChildRole role,
+ BlockReopenQueue *reopen_queue,
+ uint64_t parent_perm, uint64_t parent_shared,
+ uint64_t *nperm, uint64_t *nshared);
+
+ /**
+ * Register/unregister a buffer for I/O. For example, when the driver is
+ * interested to know the memory areas that will later be used in iovs, so
+ * that it can do IOMMU mapping with VFIO etc., in order to get better
+ * performance. In the case of VFIO drivers, this callback is used to do
+ * DMA mapping for hot buffers.
+ *
+ * Returns: true on success, false on failure
+ */
+ bool GRAPH_RDLOCK_PTR (*bdrv_register_buf)(
+ BlockDriverState *bs, void *host, size_t size, Error **errp);
+ void GRAPH_RDLOCK_PTR (*bdrv_unregister_buf)(
+ BlockDriverState *bs, void *host, size_t size);
+
+ /*
+ * This field is modified only under the BQL, and is part of
+ * the global state.
+ */
+ QLIST_ENTRY(BlockDriver) list;
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_amend)(
+ BlockDriverState *bs, BlockdevAmendOptions *opts, bool force,
+ Error **errp);
+
+ /* aio */
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_preadv)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pwritev)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_flush)(
+ BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque);
+
+ BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pdiscard)(
+ BlockDriverState *bs, int64_t offset, int bytes,
+ BlockCompletionFunc *cb, void *opaque);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_readv)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+
+ /**
+ * @offset: position in bytes to read at
+ * @bytes: number of bytes to read
+ * @qiov: the buffers to fill with read data
+ * @flags: currently unused, always 0
+ *
+ * @offset and @bytes will be a multiple of 'request_alignment',
+ * but the length of individual @qiov elements does not have to
+ * be a multiple.
+ *
+ * @bytes will always equal the total size of @qiov, and will be
+ * no larger than 'max_transfer'.
+ *
+ * The buffer in @qiov may point directly to guest memory.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv)(BlockDriverState *bs,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_writev)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ int flags);
+ /**
+ * @offset: position in bytes to write at
+ * @bytes: number of bytes to write
+ * @qiov: the buffers containing data to write
+ * @flags: zero or more bits allowed by 'supported_write_flags'
+ *
+ * @offset and @bytes will be a multiple of 'request_alignment',
+ * but the length of individual @qiov elements does not have to
+ * be a multiple.
+ *
+ * @bytes will always equal the total size of @qiov, and will be
+ * no larger than 'max_transfer'.
+ *
+ * The buffer in @qiov may point directly to guest memory.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset, BdrvRequestFlags flags);
+
+ /*
+ * Efficiently zero a region of the disk image. Typically an image format
+ * would use a compact metadata representation to implement this. This
+ * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
+ * will be called instead.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwrite_zeroes)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ BdrvRequestFlags flags);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+ /*
+ * Map [offset, offset + nbytes) range onto a child of @bs to copy from,
+ * and invoke bdrv_co_copy_range_from(child, ...), or invoke
+ * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
+ *
+ * See the comment of bdrv_co_copy_range for the parameter and return value
+ * semantics.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_copy_range_from)(
+ BlockDriverState *bs, BdrvChild *src, int64_t offset,
+ BdrvChild *dst, int64_t dst_offset, int64_t bytes,
+ BdrvRequestFlags read_flags, BdrvRequestFlags write_flags);
+
+ /*
+ * Map [offset, offset + nbytes) range onto a child of bs to copy data to,
+ * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
+ * operation if @bs is the leaf and @src has the same BlockDriver. Return
+ * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
+ *
+ * See the comment of bdrv_co_copy_range for the parameter and return value
+ * semantics.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_copy_range_to)(
+ BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset, int64_t bytes,
+ BdrvRequestFlags read_flags, BdrvRequestFlags write_flags);
+
+ /*
+ * Building block for bdrv_block_status[_above] and
+ * bdrv_is_allocated[_above]. The driver should answer only
+ * according to the current layer, and should only need to set
+ * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
+ * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
+ * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
+ * block.h for the overall meaning of the bits. As a hint, the
+ * flag want_zero is true if the caller cares more about precise
+ * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
+ * overall allocation (favor larger *pnum, perhaps by reporting
+ * _DATA instead of _ZERO). The block layer guarantees input
+ * clamped to bdrv_getlength() and aligned to request_alignment,
+ * as well as non-NULL pnum, map, and file; in turn, the driver
+ * must return an error or set pnum to an aligned non-zero value.
+ *
+ * Note that @bytes is just a hint on how big of a region the
+ * caller wants to inspect. It is not a limit on *pnum.
+ * Implementations are free to return larger values of *pnum if
+ * doing so does not incur a performance penalty.
+ *
+ * block/io.c's bdrv_co_block_status() will utilize an unclamped
+ * *pnum value for the block-status cache on protocol nodes, prior
+ * to clamping *pnum for return to its caller.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_block_status)(
+ BlockDriverState *bs,
+ bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
+ int64_t *map, BlockDriverState **file);
+
+ /*
+ * Snapshot-access API.
+ *
+ * Block-driver may provide snapshot-access API: special functions to access
+ * some internal "snapshot". The functions are similar with normal
+ * read/block_status/discard handler, but don't have any specific handling
+ * in generic block-layer: no serializing, no alignment, no tracked
+ * requests. So, block-driver that realizes these APIs is fully responsible
+ * for synchronization between snapshot-access API and normal IO requests.
+ *
+ * TODO: To be able to support qcow2's internal snapshots, this API will
+ * need to be extended to:
+ * - be able to select a specific snapshot
+ * - receive the snapshot's actual length (which may differ from bs's
+ * length)
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_preadv_snapshot)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_snapshot_block_status)(
+ BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard_snapshot)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+ /*
+ * Invalidate any cached meta-data.
+ */
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_invalidate_cache)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Flushes all data for all layers by calling bdrv_co_flush for underlying
+ * layers, if needed. This function is needed for deterministic
+ * synchronization of the flush finishing callback.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush)(BlockDriverState *bs);
+
+ /* Delete a created file. */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_delete_file)(
+ BlockDriverState *bs, Error **errp);
+
+ /*
+ * Flushes all data that was already written to the OS all the way down to
+ * the disk (for example file-posix.c calls fsync()).
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush_to_disk)(
+ BlockDriverState *bs);
+
+ /*
+ * Flushes all internal caches to the OS. The data may still sit in a
+ * writeback cache of the host OS, but it will survive a crash of the qemu
+ * process.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_flush_to_os)(
+ BlockDriverState *bs);
+
+ /*
+ * Truncate @bs to @offset bytes using the given @prealloc mode
+ * when growing. Modes other than PREALLOC_MODE_OFF should be
+ * rejected when shrinking @bs.
+ *
+ * If @exact is true, @bs must be resized to exactly @offset.
+ * Otherwise, it is sufficient for @bs (if it is a host block
+ * device and thus there is no way to resize it) to be at least
+ * @offset bytes in length.
+ *
+ * If @exact is true and this function fails but would succeed
+ * with @exact = false, it should return -ENOTSUP.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_truncate)(
+ BlockDriverState *bs, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
+
+ int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_getlength)(
+ BlockDriverState *bs);
+
+ int64_t coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_allocated_file_size)(
+ BlockDriverState *bs);
+
+ BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_compressed)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pwritev_compressed_part)(
+ BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_info)(
+ BlockDriverState *bs, BlockDriverInfo *bdi);
+
+ ImageInfoSpecific * GRAPH_RDLOCK_PTR (*bdrv_get_specific_info)(
+ BlockDriverState *bs, Error **errp);
+ BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_save_vmstate)(
+ BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_load_vmstate)(
+ BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
+
+ int coroutine_fn (*bdrv_co_zone_report)(BlockDriverState *bs,
+ int64_t offset, unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+ int coroutine_fn (*bdrv_co_zone_mgmt)(BlockDriverState *bs, BlockZoneOp op,
+ int64_t offset, int64_t len);
+ int coroutine_fn (*bdrv_co_zone_append)(BlockDriverState *bs,
+ int64_t *offset, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+ /* removable device specific */
+ bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_is_inserted)(
+ BlockDriverState *bs);
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_eject)(
+ BlockDriverState *bs, bool eject_flag);
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_lock_medium)(
+ BlockDriverState *bs, bool locked);
+
+ /* to control generic scsi devices */
+ BlockAIOCB *coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_aio_ioctl)(
+ BlockDriverState *bs, unsigned long int req, void *buf,
+ BlockCompletionFunc *cb, void *opaque);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_ioctl)(
+ BlockDriverState *bs, unsigned long int req, void *buf);
+
+ /*
+ * Returns 0 for completed check, -errno for internal errors.
+ * The check results are stored in result.
+ */
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_check)(
+ BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix);
+
+ void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_debug_event)(
+ BlockDriverState *bs, BlkdebugEvent event);
+
+ bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
+
+ bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_can_store_new_dirty_bitmap)(
+ BlockDriverState *bs, const char *name, uint32_t granularity,
+ Error **errp);
+
+ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_remove_persistent_dirty_bitmap)(
+ BlockDriverState *bs, const char *name, Error **errp);
+};
+
+static inline bool TSA_NO_TSA block_driver_can_compress(BlockDriver *drv)
+{
+ return drv->bdrv_co_pwritev_compressed ||
+ drv->bdrv_co_pwritev_compressed_part;
+}
+
+typedef struct BlockLimits {
+ /*
+ * Alignment requirement, in bytes, for offset/length of I/O
+ * requests. Must be a power of 2 less than INT_MAX; defaults to
+ * 1 for drivers with modern byte interfaces, and to 512
+ * otherwise.
+ */
+ uint32_t request_alignment;
+
+ /*
+ * Maximum number of bytes that can be discarded at once. Must be multiple
+ * of pdiscard_alignment, but need not be power of 2. May be 0 if no
+ * inherent 64-bit limit.
+ */
+ int64_t max_pdiscard;
+
+ /*
+ * Optimal alignment for discard requests in bytes. A power of 2
+ * is best but not mandatory. Must be a multiple of
+ * bl.request_alignment, and must be less than max_pdiscard if
+ * that is set. May be 0 if bl.request_alignment is good enough
+ */
+ uint32_t pdiscard_alignment;
+
+ /*
+ * Maximum number of bytes that can zeroized at once. Must be multiple of
+ * pwrite_zeroes_alignment. 0 means no limit.
+ */
+ int64_t max_pwrite_zeroes;
+
+ /*
+ * Optimal alignment for write zeroes requests in bytes. A power
+ * of 2 is best but not mandatory. Must be a multiple of
+ * bl.request_alignment, and must be less than max_pwrite_zeroes
+ * if that is set. May be 0 if bl.request_alignment is good
+ * enough
+ */
+ uint32_t pwrite_zeroes_alignment;
+
+ /*
+ * Optimal transfer length in bytes. A power of 2 is best but not
+ * mandatory. Must be a multiple of bl.request_alignment, or 0 if
+ * no preferred size
+ */
+ uint32_t opt_transfer;
+
+ /*
+ * Maximal transfer length in bytes. Need not be power of 2, but
+ * must be multiple of opt_transfer and bl.request_alignment, or 0
+ * for no 32-bit limit. For now, anything larger than INT_MAX is
+ * clamped down.
+ */
+ uint32_t max_transfer;
+
+ /*
+ * Maximal hardware transfer length in bytes. Applies whenever
+ * transfers to the device bypass the kernel I/O scheduler, for
+ * example with SG_IO. If larger than max_transfer or if zero,
+ * blk_get_max_hw_transfer will fall back to max_transfer.
+ */
+ uint64_t max_hw_transfer;
+
+ /*
+ * Maximal number of scatter/gather elements allowed by the hardware.
+ * Applies whenever transfers to the device bypass the kernel I/O
+ * scheduler, for example with SG_IO. If larger than max_iov
+ * or if zero, blk_get_max_hw_iov will fall back to max_iov.
+ */
+ int max_hw_iov;
+
+
+ /* memory alignment, in bytes so that no bounce buffer is needed */
+ size_t min_mem_alignment;
+
+ /* memory alignment, in bytes, for bounce buffer */
+ size_t opt_mem_alignment;
+
+ /* maximum number of iovec elements */
+ int max_iov;
+
+ /*
+ * true if the length of the underlying file can change, and QEMU
+ * is expected to adjust automatically. Mostly for CD-ROM drives,
+ * whose length is zero when the tray is empty (they don't need
+ * an explicit monitor command to load the disk inside the guest).
+ */
+ bool has_variable_length;
+
+ /* device zone model */
+ BlockZoneModel zoned;
+
+ /* zone size expressed in bytes */
+ uint32_t zone_size;
+
+ /* total number of zones */
+ uint32_t nr_zones;
+
+ /* maximum sectors of a zone append write operation */
+ uint32_t max_append_sectors;
+
+ /* maximum number of open zones */
+ uint32_t max_open_zones;
+
+ /* maximum number of active zones */
+ uint32_t max_active_zones;
+
+ uint32_t write_granularity;
+} BlockLimits;
+
+typedef struct BdrvOpBlocker BdrvOpBlocker;
+
+typedef struct BdrvAioNotifier {
+ void (*attached_aio_context)(AioContext *new_context, void *opaque);
+ void (*detach_aio_context)(void *opaque);
+
+ void *opaque;
+ bool deleted;
+
+ QLIST_ENTRY(BdrvAioNotifier) list;
+} BdrvAioNotifier;
+
+struct BdrvChildClass {
+ /*
+ * If true, bdrv_replace_node() doesn't change the node this BdrvChild
+ * points to.
+ */
+ bool stay_at_node;
+
+ /*
+ * If true, the parent is a BlockDriverState and bdrv_next_all_states()
+ * will return it. This information is used for drain_all, where every node
+ * will be drained separately, so the drain only needs to be propagated to
+ * non-BDS parents.
+ */
+ bool parent_is_bds;
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+ void (*inherit_options)(BdrvChildRole role, bool parent_is_format,
+ int *child_flags, QDict *child_options,
+ int parent_flags, QDict *parent_options);
+ void (*change_media)(BdrvChild *child, bool load);
+
+ /*
+ * Returns a malloced string that describes the parent of the child for a
+ * human reader. This could be a node-name, BlockBackend name, qdev ID or
+ * QOM path of the device owning the BlockBackend, job type and ID etc. The
+ * caller is responsible for freeing the memory.
+ */
+ char *(*get_parent_desc)(BdrvChild *child);
+
+ /*
+ * Notifies the parent that the child has been activated/inactivated (e.g.
+ * when migration is completing) and it can start/stop requesting
+ * permissions and doing I/O on it.
+ */
+ void GRAPH_RDLOCK_PTR (*activate)(BdrvChild *child, Error **errp);
+ int GRAPH_RDLOCK_PTR (*inactivate)(BdrvChild *child);
+
+ void GRAPH_WRLOCK_PTR (*attach)(BdrvChild *child);
+ void GRAPH_WRLOCK_PTR (*detach)(BdrvChild *child);
+
+ /*
+ * If this pair of functions is implemented, the parent doesn't issue new
+ * requests after returning from .drained_begin() until .drained_end() is
+ * called.
+ *
+ * These functions must not change the graph (and therefore also must not
+ * call aio_poll(), which could change the graph indirectly).
+ *
+ * Note that this can be nested. If drained_begin() was called twice, new
+ * I/O is allowed only after drained_end() was called twice, too.
+ */
+ void GRAPH_RDLOCK_PTR (*drained_begin)(BdrvChild *child);
+ void GRAPH_RDLOCK_PTR (*drained_end)(BdrvChild *child);
+
+ /*
+ * Returns whether the parent has pending requests for the child. This
+ * callback is polled after .drained_begin() has been called until all
+ * activity on the child has stopped.
+ */
+ bool GRAPH_RDLOCK_PTR (*drained_poll)(BdrvChild *child);
+
+ /*
+ * Notifies the parent that the filename of its child has changed (e.g.
+ * because the direct child was removed from the backing chain), so that it
+ * can update its reference.
+ */
+ int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
+ const char *filename,
+ bool backing_mask_protocol,
+ Error **errp);
+
+ bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx,
+ GHashTable *visited, Transaction *tran,
+ Error **errp);
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ void (*resize)(BdrvChild *child);
+
+ /*
+ * Returns a name that is supposedly more useful for human users than the
+ * node name for identifying the node in question (in particular, a BB
+ * name), or NULL if the parent can't provide a better name.
+ */
+ const char *(*get_name)(BdrvChild *child);
+
+ AioContext *(*get_parent_aio_context)(BdrvChild *child);
+};
+
+extern const BdrvChildClass child_of_bds;
+
+struct BdrvChild {
+ BlockDriverState *bs;
+ char *name;
+ const BdrvChildClass *klass;
+ BdrvChildRole role;
+ void *opaque;
+
+ /**
+ * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
+ */
+ uint64_t perm;
+
+ /**
+ * Permissions that can still be granted to other users of @bs while this
+ * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
+ */
+ uint64_t shared_perm;
+
+ /*
+ * This link is frozen: the child can neither be replaced nor
+ * detached from the parent.
+ */
+ bool frozen;
+
+ /*
+ * True if the parent of this child has been drained by this BdrvChild
+ * (through klass->drained_*).
+ *
+ * It is generally true if bs->quiesce_counter > 0. It may differ while the
+ * child is entering or leaving a drained section.
+ */
+ bool quiesced_parent;
+
+ QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next;
+ QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next_parent;
+};
+
+/*
+ * Allows bdrv_co_block_status() to cache one data region for a
+ * protocol node.
+ *
+ * @valid: Whether the cache is valid (should be accessed with atomic
+ * functions so this can be reset by RCU readers)
+ * @data_start: Offset where we know (or strongly assume) is data
+ * @data_end: Offset where the data region ends (which is not necessarily
+ * the start of a zeroed region)
+ */
+typedef struct BdrvBlockStatusCache {
+ struct rcu_head rcu;
+
+ bool valid;
+ int64_t data_start;
+ int64_t data_end;
+} BdrvBlockStatusCache;
+
+struct BlockDriverState {
+ /*
+ * Protected by big QEMU lock or read-only after opening. No special
+ * locking needed during I/O...
+ */
+ int open_flags; /* flags used to open the file, re-used for re-open */
+ bool encrypted; /* if true, the media is encrypted */
+ bool sg; /* if true, the device is a /dev/sg* */
+ bool probed; /* if true, format was probed rather than specified */
+ bool force_share; /* if true, always allow all shared permissions */
+ bool implicit; /* if true, this filter node was automatically inserted */
+
+ BlockDriver *drv; /* NULL means no media */
+ void *opaque;
+
+ AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
+ /*
+ * long-running tasks intended to always use the same AioContext as this
+ * BDS may register themselves in this list to be notified of changes
+ * regarding this BDS's context
+ */
+ QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
+ bool walking_aio_notifiers; /* to make removal during iteration safe */
+
+ char filename[PATH_MAX];
+ /*
+ * If not empty, this image is a diff in relation to backing_file.
+ * Note that this is the name given in the image header and
+ * therefore may or may not be equal to .backing->bs->filename.
+ * If this field contains a relative path, it is to be resolved
+ * relatively to the overlay's location.
+ */
+ char backing_file[PATH_MAX];
+ /*
+ * The backing filename indicated by the image header. Contrary
+ * to backing_file, if we ever open this file, auto_backing_file
+ * is replaced by the resulting BDS's filename (i.e. after a
+ * bdrv_refresh_filename() run).
+ */
+ char auto_backing_file[PATH_MAX];
+ char backing_format[16]; /* if non-zero and backing_file exists */
+
+ QDict *full_open_options;
+ char exact_filename[PATH_MAX];
+
+ /* I/O Limits */
+ BlockLimits bl;
+
+ /*
+ * Flags honored during pread
+ */
+ BdrvRequestFlags supported_read_flags;
+ /*
+ * Flags honored during pwrite (so far: BDRV_REQ_FUA,
+ * BDRV_REQ_WRITE_UNCHANGED).
+ * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
+ * writes will be issued as normal writes without the flag set.
+ * This is important to note for drivers that do not explicitly
+ * request a WRITE permission for their children and instead take
+ * the same permissions as their parent did (this is commonly what
+ * block filters do). Such drivers have to be aware that the
+ * parent may have taken a WRITE_UNCHANGED permission only and is
+ * issuing such requests. Drivers either must make sure that
+ * these requests do not result in plain WRITE accesses (usually
+ * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
+ * every incoming write request as-is, including potentially that
+ * flag), or they have to explicitly take the WRITE permission for
+ * their children.
+ */
+ BdrvRequestFlags supported_write_flags;
+ /*
+ * Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
+ * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED)
+ */
+ BdrvRequestFlags supported_zero_flags;
+ /*
+ * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE).
+ *
+ * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure
+ * that any added space reads as all zeros. If this can't be guaranteed,
+ * the operation must fail.
+ */
+ BdrvRequestFlags supported_truncate_flags;
+
+ /* the following member gives a name to every node on the bs graph. */
+ char node_name[32];
+ /* element of the list of named nodes building the graph */
+ QTAILQ_ENTRY(BlockDriverState) node_list;
+ /* element of the list of all BlockDriverStates (all_bdrv_states) */
+ QTAILQ_ENTRY(BlockDriverState) bs_list;
+ /* element of the list of monitor-owned BDS */
+ QTAILQ_ENTRY(BlockDriverState) monitor_list;
+ int refcnt;
+
+ /* operation blockers. Protected by BQL. */
+ QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
+
+ /*
+ * The node that this node inherited default options from (and a reopen on
+ * which can affect this node by changing these defaults). This is always a
+ * parent node of this node.
+ */
+ BlockDriverState *inherits_from;
+
+ /*
+ * @backing and @file are some of @children or NULL. All these three fields
+ * (@file, @backing and @children) are modified only in
+ * bdrv_child_cb_attach() and bdrv_child_cb_detach().
+ *
+ * See also comment in include/block/block.h, to learn how backing and file
+ * are connected with BdrvChildRole.
+ */
+ QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) children;
+ BdrvChild * GRAPH_RDLOCK_PTR backing;
+ BdrvChild * GRAPH_RDLOCK_PTR file;
+
+ QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) parents;
+
+ QDict *options;
+ QDict *explicit_options;
+ BlockdevDetectZeroesOptions detect_zeroes;
+
+ /* The error object in use for blocking operations on backing_hd */
+ Error *backing_blocker;
+
+ /*
+ * If we are reading a disk image, give its size in sectors.
+ * Generally read-only; it is written to by load_snapshot and
+ * save_snaphost, but the block layer is quiescent during those.
+ */
+ int64_t total_sectors;
+
+ /* threshold limit for writes, in bytes. "High water mark". */
+ uint64_t write_threshold_offset;
+
+ /*
+ * Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
+ * Reading from the list can be done with either the BQL or the
+ * dirty_bitmap_mutex. Modifying a bitmap only requires
+ * dirty_bitmap_mutex.
+ */
+ QemuMutex dirty_bitmap_mutex;
+ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
+
+ /* Offset after the highest byte written to */
+ Stat64 wr_highest_offset;
+
+ /*
+ * If true, copy read backing sectors into image. Can be >1 if more
+ * than one client has requested copy-on-read. Accessed with atomic
+ * ops.
+ */
+ int copy_on_read;
+
+ /*
+ * number of in-flight requests; overall and serialising.
+ * Accessed with atomic ops.
+ */
+ unsigned int in_flight;
+ unsigned int serialising_in_flight;
+
+ /* do we need to tell the quest if we have a volatile write cache? */
+ int enable_write_cache;
+
+ /* Accessed with atomic ops. */
+ int quiesce_counter;
+
+ unsigned int write_gen; /* Current data generation */
+
+ /* Protected by reqs_lock. */
+ QemuMutex reqs_lock;
+ QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
+ CoQueue flush_queue; /* Serializing flush queue */
+ bool active_flush_req; /* Flush request in flight? */
+
+ /* Only read/written by whoever has set active_flush_req to true. */
+ unsigned int flushed_gen; /* Flushed write generation */
+
+ /* BdrvChild links to this node may never be frozen */
+ bool never_freeze;
+
+ /* Lock for block-status cache RCU writers */
+ CoMutex bsc_modify_lock;
+ /* Always non-NULL, but must only be dereferenced under an RCU read guard */
+ BdrvBlockStatusCache *block_status_cache;
+
+ /* array of write pointers' location of each zone in the zoned device. */
+ BlockZoneWps *wps;
+};
+
+struct BlockBackendRootState {
+ int open_flags;
+ BlockdevDetectZeroesOptions detect_zeroes;
+};
+
+typedef enum BlockMirrorBackingMode {
+ /*
+ * Reuse the existing backing chain from the source for the target.
+ * - sync=full: Set backing BDS to NULL.
+ * - sync=top: Use source's backing BDS.
+ * - sync=none: Use source as the backing BDS.
+ */
+ MIRROR_SOURCE_BACKING_CHAIN,
+
+ /* Open the target's backing chain completely anew */
+ MIRROR_OPEN_BACKING_CHAIN,
+
+ /* Do not change the target's backing BDS after job completion */
+ MIRROR_LEAVE_BACKING_CHAIN,
+} BlockMirrorBackingMode;
+
+
+/*
+ * Essential block drivers which must always be statically linked into qemu, and
+ * which therefore can be accessed without using bdrv_find_format()
+ */
+extern BlockDriver bdrv_file;
+extern BlockDriver bdrv_raw;
+extern BlockDriver bdrv_qcow2;
+
+extern unsigned int bdrv_drain_all_count;
+extern QemuOptsList bdrv_create_opts_simple;
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * See include/block/block-common.h for more information about
+ * the Common API.
+ */
+
+static inline BlockDriverState *child_bs(BdrvChild *child)
+{
+ return child ? child->bs : NULL;
+}
+
+int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp);
+char *create_tmp_file(Error **errp);
+void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
+ QDict *options);
+
+
+int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ Error **errp);
+
+#ifdef _WIN32
+int is_windows_drive(const char *filename);
+#endif
+
+#endif /* BLOCK_INT_COMMON_H */
diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h
new file mode 100644
index 0000000000..d2201e27f4
--- /dev/null
+++ b/include/block/block_int-global-state.h
@@ -0,0 +1,326 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef BLOCK_INT_GLOBAL_STATE_H
+#define BLOCK_INT_GLOBAL_STATE_H
+
+#include "block/blockjob.h"
+#include "block/block_int-common.h"
+#include "qemu/hbitmap.h"
+#include "qemu/main-loop.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+/**
+ * stream_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @base: Block device that will become the new base, or %NULL to
+ * flatten the whole backing file chain onto @bs.
+ * @backing_file_str: The file name that will be written to @bs as the
+ * the new backing file if the job completes. Ignored if @base is %NULL.
+ * @backing_mask_protocol: Replace potential protocol name with 'raw' in
+ * 'backing file format' header
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the stream job inserts into the graph above
+ * @bs. NULL means that a node name should be autogenerated.
+ * @errp: Error object.
+ *
+ * Start a streaming operation on @bs. Clusters that are unallocated
+ * in @bs, but allocated in any image between @base and @bs (both
+ * exclusive) will be written to @bs. At the end of a successful
+ * streaming job, the backing file of @bs will be changed to
+ * @backing_file_str in the written image and to @base in the live
+ * BlockDriverState.
+ */
+void stream_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, const char *backing_file_str,
+ bool backing_mask_protocol,
+ BlockDriverState *bottom,
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error,
+ const char *filter_node_name,
+ Error **errp);
+
+/**
+ * commit_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Active block device.
+ * @top: Top block device to be committed.
+ * @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @backing_file_str: String to use as the backing file in @top's overlay
+ * @backing_mask_protocol: Replace potential protocol name with 'raw' in
+ * 'backing file format' header
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the commit job inserts into the graph above @top. NULL means
+ * that a node name should be autogenerated.
+ * @errp: Error object.
+ *
+ */
+void commit_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, BlockDriverState *top,
+ int creation_flags, int64_t speed,
+ BlockdevOnError on_error, const char *backing_file_str,
+ bool backing_mask_protocol,
+ const char *filter_node_name, Error **errp);
+/**
+ * commit_active_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Active block device to be committed.
+ * @base: Block device that will be written into, and become the new top.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the commit job inserts into the graph above @bs. NULL means that
+ * a node name should be autogenerated.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @auto_complete: Auto complete the job.
+ * @errp: Error object.
+ *
+ */
+BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
+ const char *filter_node_name,
+ BlockCompletionFunc *cb, void *opaque,
+ bool auto_complete, Error **errp);
+/*
+ * mirror_start:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @target: Block device to write to.
+ * @replaces: Block graph node name to replace once the mirror is done. Can
+ * only be used when full mirroring is selected.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @granularity: The chosen granularity for the dirty bitmap.
+ * @buf_size: The amount of data that can be in flight at one time.
+ * @mode: Whether to collapse all images in the chain to the target.
+ * @backing_mode: How to establish the target's backing chain after completion.
+ * @zero_target: Whether the target should be explicitly zero-initialized
+ * @on_source_error: The action to take upon error reading from the source.
+ * @on_target_error: The action to take upon error writing to the target.
+ * @unmap: Whether to unmap target where source sectors only contain zeroes.
+ * @filter_node_name: The node name that should be assigned to the filter
+ * driver that the mirror job inserts into the graph above @bs. NULL means that
+ * a node name should be autogenerated.
+ * @copy_mode: When to trigger writes to the target.
+ * @errp: Error object.
+ *
+ * Start a mirroring operation on @bs. Clusters that are allocated
+ * in @bs will be written to @target until the job is cancelled or
+ * manually completed. At the end of a successful mirroring job,
+ * @bs will be switched to read from @target.
+ */
+void mirror_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, const char *replaces,
+ int creation_flags, int64_t speed,
+ uint32_t granularity, int64_t buf_size,
+ MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
+ bool zero_target,
+ BlockdevOnError on_source_error,
+ BlockdevOnError on_target_error,
+ bool unmap, const char *filter_node_name,
+ MirrorCopyMode copy_mode, Error **errp);
+
+/*
+ * backup_job_create:
+ * @job_id: The id of the newly-created job, or %NULL to use the
+ * device name of @bs.
+ * @bs: Block device to operate on.
+ * @target: Block device to write to.
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @sync_mode: What parts of the disk image should be copied to the destination.
+ * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
+ * @bitmap_mode: The bitmap synchronization policy to use.
+ * @perf: Performance options. All actual fields assumed to be present,
+ * all ".has_*" fields are ignored.
+ * @on_source_error: The action to take upon error reading from the source.
+ * @on_target_error: The action to take upon error writing to the target.
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
+ * See @BlockJobCreateFlags
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @txn: Transaction that this job is part of (may be NULL).
+ *
+ * Create a backup operation on @bs. Clusters in @bs are written to @target
+ * until the job is cancelled or manually completed.
+ */
+BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, int64_t speed,
+ MirrorSyncMode sync_mode,
+ BdrvDirtyBitmap *sync_bitmap,
+ BitmapSyncMode bitmap_mode,
+ bool compress,
+ const char *filter_node_name,
+ BackupPerf *perf,
+ BlockdevOnError on_source_error,
+ BlockdevOnError on_target_error,
+ int creation_flags,
+ BlockCompletionFunc *cb, void *opaque,
+ JobTxn *txn, Error **errp);
+
+BdrvChild * GRAPH_WRLOCK
+bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name,
+ const BdrvChildClass *child_class,
+ BdrvChildRole child_role,
+ uint64_t perm, uint64_t shared_perm,
+ void *opaque, Error **errp);
+
+void GRAPH_WRLOCK bdrv_root_unref_child(BdrvChild *child);
+
+void GRAPH_RDLOCK bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
+ uint64_t *shared_perm);
+
+/**
+ * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use
+ * bdrv_child_refresh_perms() instead and make the parent's
+ * .bdrv_child_perm() implementation return the correct values.
+ */
+int GRAPH_RDLOCK
+bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
+ Error **errp);
+
+/**
+ * Calls bs->drv->bdrv_child_perm() and updates the child's permission
+ * masks with the result.
+ * Drivers should invoke this function whenever an event occurs that
+ * makes their .bdrv_child_perm() implementation return different
+ * values than before, but which will not result in the block layer
+ * automatically refreshing the permissions.
+ */
+int GRAPH_RDLOCK
+bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp);
+
+bool GRAPH_RDLOCK bdrv_recurse_can_replace(BlockDriverState *bs,
+ BlockDriverState *to_replace);
+
+/*
+ * Default implementation for BlockDriver.bdrv_child_perm() that can
+ * be used by block filters and image formats, as long as they use the
+ * child_of_bds child class and set an appropriate BdrvChildRole.
+ */
+void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c,
+ BdrvChildRole role, BlockReopenQueue *reopen_queue,
+ uint64_t perm, uint64_t shared,
+ uint64_t *nperm, uint64_t *nshared);
+
+void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
+bool blk_dev_has_removable_media(BlockBackend *blk);
+void blk_dev_eject_request(BlockBackend *blk, bool force);
+bool blk_dev_is_medium_locked(BlockBackend *blk);
+
+void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
+
+void bdrv_set_monitor_owned(BlockDriverState *bs);
+
+void blockdev_close_all_bdrv_states(void);
+
+BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp);
+
+/**
+ * Simple implementation of bdrv_co_create_opts for protocol drivers
+ * which only support creation via opening a file
+ * (usually existing raw storage device)
+ */
+int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
+ const char *filename,
+ QemuOpts *opts,
+ Error **errp);
+
+BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
+ const char *name,
+ BlockDriverState **pbs,
+ Error **errp);
+BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
+ BlockDirtyBitmapOrStrList *bms,
+ HBitmap **backup, Error **errp);
+BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
+ bool release,
+ BlockDriverState **bitmap_bs,
+ Error **errp);
+
+
+BlockDriverState * GRAPH_RDLOCK
+bdrv_skip_implicit_filters(BlockDriverState *bs);
+
+/**
+ * bdrv_add_aio_context_notifier:
+ *
+ * If a long-running job intends to be always run in the same AioContext as a
+ * certain BDS, it may use this function to be notified of changes regarding the
+ * association of the BDS to an AioContext.
+ *
+ * attached_aio_context() is called after the target BDS has been attached to a
+ * new AioContext; detach_aio_context() is called before the target BDS is being
+ * detached from its old AioContext.
+ */
+void bdrv_add_aio_context_notifier(BlockDriverState *bs,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque);
+
+/**
+ * bdrv_remove_aio_context_notifier:
+ *
+ * Unsubscribe of change notifications regarding the BDS's AioContext. The
+ * parameters given here have to be the same as those given to
+ * bdrv_add_aio_context_notifier().
+ */
+void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
+ void (*aio_context_attached)(AioContext *,
+ void *),
+ void (*aio_context_detached)(void *),
+ void *opaque);
+
+/**
+ * End all quiescent sections started by bdrv_drain_all_begin(). This is
+ * needed when deleting a BDS before bdrv_drain_all_end() is called.
+ *
+ * NOTE: this is an internal helper for bdrv_close() *only*. No one else
+ * should call it.
+ */
+void bdrv_drain_all_end_quiesce(BlockDriverState *bs);
+
+#endif /* BLOCK_INT_GLOBAL_STATE_H */
diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h
new file mode 100644
index 0000000000..4a7cf2b4fd
--- /dev/null
+++ b/include/block/block_int-io.h
@@ -0,0 +1,194 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_INT_IO_H
+#define BLOCK_INT_IO_H
+
+#include "block/block_int-common.h"
+#include "qemu/hbitmap.h"
+#include "qemu/main-loop.h"
+
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_snapshot(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_snapshot_block_status(
+ BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map, BlockDriverState **file);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard_snapshot(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_part(BdrvChild *child,
+ int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pwritev(BdrvChild *child,
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn GRAPH_RDLOCK bdrv_co_pwritev_part(BdrvChild *child,
+ int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
+
+static inline int coroutine_fn GRAPH_RDLOCK bdrv_co_pread(BdrvChild *child,
+ int64_t offset, int64_t bytes, void *buf, BdrvRequestFlags flags)
+{
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
+ assert_bdrv_graph_readable();
+
+ return bdrv_co_preadv(child, offset, bytes, &qiov, flags);
+}
+
+static inline int coroutine_fn GRAPH_RDLOCK bdrv_co_pwrite(BdrvChild *child,
+ int64_t offset, int64_t bytes, const void *buf, BdrvRequestFlags flags)
+{
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
+ assert_bdrv_graph_readable();
+
+ return bdrv_co_pwritev(child, offset, bytes, &qiov, flags);
+}
+
+void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
+ uint64_t align);
+BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
+
+BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
+ const char *filename);
+
+/**
+ * bdrv_wakeup:
+ * @bs: The BlockDriverState for which an I/O operation has been completed.
+ *
+ * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
+ * synchronous I/O on a BlockDriverState that is attached to another
+ * I/O thread, the main thread lets the I/O thread's event loop run,
+ * waiting for the I/O operation to complete. A bdrv_wakeup will wake
+ * up the main thread if necessary.
+ *
+ * Manual calls to bdrv_wakeup are rarely necessary, because
+ * bdrv_dec_in_flight already calls it.
+ */
+void bdrv_wakeup(BlockDriverState *bs);
+
+const char * GRAPH_RDLOCK bdrv_get_parent_name(const BlockDriverState *bs);
+bool blk_dev_has_tray(BlockBackend *blk);
+bool blk_dev_is_tray_open(BlockBackend *blk);
+
+void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
+void bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
+ const BdrvDirtyBitmap *src,
+ HBitmap **backup, bool lock);
+
+void bdrv_inc_in_flight(BlockDriverState *bs);
+void bdrv_dec_in_flight(BlockDriverState *bs);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_refresh_total_sectors(BlockDriverState *bs, int64_t hint);
+
+int co_wrapper_mixed_bdrv_rdlock
+bdrv_refresh_total_sectors(BlockDriverState *bs, int64_t hint);
+
+BdrvChild * GRAPH_RDLOCK bdrv_cow_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_filter_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_filter_or_cow_child(BlockDriverState *bs);
+BdrvChild * GRAPH_RDLOCK bdrv_primary_child(BlockDriverState *bs);
+BlockDriverState * GRAPH_RDLOCK bdrv_skip_filters(BlockDriverState *bs);
+BlockDriverState * GRAPH_RDLOCK bdrv_backing_chain_next(BlockDriverState *bs);
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_cow_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_cow_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_filter_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_filter_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_filter_or_cow_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_filter_or_cow_child(bs));
+}
+
+static inline BlockDriverState * GRAPH_RDLOCK
+bdrv_primary_bs(BlockDriverState *bs)
+{
+ IO_CODE();
+ return child_bs(bdrv_primary_child(bs));
+}
+
+/**
+ * Check whether the given offset is in the cached block-status data
+ * region.
+ *
+ * If it is, and @pnum is not NULL, *pnum is set to
+ * `bsc.data_end - offset`, i.e. how many bytes, starting from
+ * @offset, are data (according to the cache).
+ * Otherwise, *pnum is not touched.
+ */
+bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum);
+
+/**
+ * If [offset, offset + bytes) overlaps with the currently cached
+ * block-status region, invalidate the cache.
+ *
+ * (To be used by I/O paths that cause data regions to be zero or
+ * holes.)
+ */
+void bdrv_bsc_invalidate_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes);
+
+/**
+ * Mark the range [offset, offset + bytes) as a data region.
+ */
+void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes);
+
+#endif /* BLOCK_INT_IO_H */
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 38dec0275b..567a178e13 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -24,1361 +24,10 @@
#ifndef BLOCK_INT_H
#define BLOCK_INT_H
-#include "block/accounting.h"
-#include "block/block.h"
-#include "block/aio-wait.h"
-#include "qemu/queue.h"
-#include "qemu/coroutine.h"
-#include "qemu/stats64.h"
-#include "qemu/timer.h"
-#include "qemu/hbitmap.h"
-#include "block/snapshot.h"
-#include "qemu/throttle.h"
+#include "block/block_int-global-state.h"
+#include "block/block_int-io.h"
+#include "block/graph-lock.h"
-#define BLOCK_FLAG_LAZY_REFCOUNTS 8
-
-#define BLOCK_OPT_SIZE "size"
-#define BLOCK_OPT_ENCRYPT "encryption"
-#define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
-#define BLOCK_OPT_COMPAT6 "compat6"
-#define BLOCK_OPT_HWVERSION "hwversion"
-#define BLOCK_OPT_BACKING_FILE "backing_file"
-#define BLOCK_OPT_BACKING_FMT "backing_fmt"
-#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
-#define BLOCK_OPT_TABLE_SIZE "table_size"
-#define BLOCK_OPT_PREALLOC "preallocation"
-#define BLOCK_OPT_SUBFMT "subformat"
-#define BLOCK_OPT_COMPAT_LEVEL "compat"
-#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
-#define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
-#define BLOCK_OPT_REDUNDANCY "redundancy"
-#define BLOCK_OPT_NOCOW "nocow"
-#define BLOCK_OPT_EXTENT_SIZE_HINT "extent_size_hint"
-#define BLOCK_OPT_OBJECT_SIZE "object_size"
-#define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
-#define BLOCK_OPT_DATA_FILE "data_file"
-#define BLOCK_OPT_DATA_FILE_RAW "data_file_raw"
-#define BLOCK_OPT_COMPRESSION_TYPE "compression_type"
-
-#define BLOCK_PROBE_BUF_SIZE 512
-
-enum BdrvTrackedRequestType {
- BDRV_TRACKED_READ,
- BDRV_TRACKED_WRITE,
- BDRV_TRACKED_DISCARD,
- BDRV_TRACKED_TRUNCATE,
-};
-
-typedef struct BdrvTrackedRequest {
- BlockDriverState *bs;
- int64_t offset;
- uint64_t bytes;
- enum BdrvTrackedRequestType type;
-
- bool serialising;
- int64_t overlap_offset;
- uint64_t overlap_bytes;
-
- QLIST_ENTRY(BdrvTrackedRequest) list;
- Coroutine *co; /* owner, used for deadlock detection */
- CoQueue wait_queue; /* coroutines blocked on this request */
-
- struct BdrvTrackedRequest *waiting_for;
-} BdrvTrackedRequest;
-
-struct BlockDriver {
- const char *format_name;
- int instance_size;
-
- /* set to true if the BlockDriver is a block filter. Block filters pass
- * certain callbacks that refer to data (see block.c) to their bs->file if
- * the driver doesn't implement them. Drivers that do not wish to forward
- * must implement them and return -ENOTSUP.
- */
- bool is_filter;
- /*
- * Set to true if the BlockDriver is a format driver. Format nodes
- * generally do not expect their children to be other format nodes
- * (except for backing files), and so format probing is disabled
- * on those children.
- */
- bool is_format;
- /*
- * Return true if @to_replace can be replaced by a BDS with the
- * same data as @bs without it affecting @bs's behavior (that is,
- * without it being visible to @bs's parents).
- */
- bool (*bdrv_recurse_can_replace)(BlockDriverState *bs,
- BlockDriverState *to_replace);
-
- int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
- int (*bdrv_probe_device)(const char *filename);
-
- /* Any driver implementing this callback is expected to be able to handle
- * NULL file names in its .bdrv_open() implementation */
- void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
- /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
- * this field set to true, except ones that are defined only by their
- * child's bs.
- * An example of the last type will be the quorum block driver.
- */
- bool bdrv_needs_filename;
-
- /*
- * Set if a driver can support backing files. This also implies the
- * following semantics:
- *
- * - Return status 0 of .bdrv_co_block_status means that corresponding
- * blocks are not allocated in this layer of backing-chain
- * - For such (unallocated) blocks, read will:
- * - fill buffer with zeros if there is no backing file
- * - read from the backing file otherwise, where the block layer
- * takes care of reading zeros beyond EOF if backing file is short
- */
- bool supports_backing;
-
- /* For handling image reopen for split or non-split files */
- int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
- BlockReopenQueue *queue, Error **errp);
- void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
- void (*bdrv_reopen_commit_post)(BDRVReopenState *reopen_state);
- void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
- void (*bdrv_join_options)(QDict *options, QDict *old_options);
-
- int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
- Error **errp);
-
- /* Protocol drivers should implement this instead of bdrv_open */
- int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
- Error **errp);
- void (*bdrv_close)(BlockDriverState *bs);
-
-
- int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts,
- Error **errp);
- int coroutine_fn (*bdrv_co_create_opts)(BlockDriver *drv,
- const char *filename,
- QemuOpts *opts,
- Error **errp);
-
- int coroutine_fn (*bdrv_co_amend)(BlockDriverState *bs,
- BlockdevAmendOptions *opts,
- bool force,
- Error **errp);
-
- int (*bdrv_amend_options)(BlockDriverState *bs,
- QemuOpts *opts,
- BlockDriverAmendStatusCB *status_cb,
- void *cb_opaque,
- bool force,
- Error **errp);
-
- int (*bdrv_make_empty)(BlockDriverState *bs);
-
- /*
- * Refreshes the bs->exact_filename field. If that is impossible,
- * bs->exact_filename has to be left empty.
- */
- void (*bdrv_refresh_filename)(BlockDriverState *bs);
-
- /*
- * Gathers the open options for all children into @target.
- * A simple format driver (without backing file support) might
- * implement this function like this:
- *
- * QINCREF(bs->file->bs->full_open_options);
- * qdict_put(target, "file", bs->file->bs->full_open_options);
- *
- * If not specified, the generic implementation will simply put
- * all children's options under their respective name.
- *
- * @backing_overridden is true when bs->backing seems not to be
- * the child that would result from opening bs->backing_file.
- * Therefore, if it is true, the backing child's options should be
- * gathered; otherwise, there is no need since the backing child
- * is the one implied by the image header.
- *
- * Note that ideally this function would not be needed. Every
- * block driver which implements it is probably doing something
- * shady regarding its runtime option structure.
- */
- void (*bdrv_gather_child_options)(BlockDriverState *bs, QDict *target,
- bool backing_overridden);
-
- /*
- * Returns an allocated string which is the directory name of this BDS: It
- * will be used to make relative filenames absolute by prepending this
- * function's return value to them.
- */
- char *(*bdrv_dirname)(BlockDriverState *bs, Error **errp);
-
- /* aio */
- BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque);
- BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
- int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque);
-
- int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
-
- /**
- * @offset: position in bytes to read at
- * @bytes: number of bytes to read
- * @qiov: the buffers to fill with read data
- * @flags: currently unused, always 0
- *
- * @offset and @bytes will be a multiple of 'request_alignment',
- * but the length of individual @qiov elements does not have to
- * be a multiple.
- *
- * @bytes will always equal the total size of @qiov, and will be
- * no larger than 'max_transfer'.
- *
- * The buffer in @qiov may point directly to guest memory.
- */
- int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
- int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes,
- QEMUIOVector *qiov, size_t qiov_offset, int flags);
- int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
- /**
- * @offset: position in bytes to write at
- * @bytes: number of bytes to write
- * @qiov: the buffers containing data to write
- * @flags: zero or more bits allowed by 'supported_write_flags'
- *
- * @offset and @bytes will be a multiple of 'request_alignment',
- * but the length of individual @qiov elements does not have to
- * be a multiple.
- *
- * @bytes will always equal the total size of @qiov, and will be
- * no larger than 'max_transfer'.
- *
- * The buffer in @qiov may point directly to guest memory.
- */
- int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
- int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes,
- QEMUIOVector *qiov, size_t qiov_offset, int flags);
-
- /*
- * Efficiently zero a region of the disk image. Typically an image format
- * would use a compact metadata representation to implement this. This
- * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
- * will be called instead.
- */
- int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
- int64_t offset, int bytes, BdrvRequestFlags flags);
- int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
- int64_t offset, int bytes);
-
- /* Map [offset, offset + nbytes) range onto a child of @bs to copy from,
- * and invoke bdrv_co_copy_range_from(child, ...), or invoke
- * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
- *
- * See the comment of bdrv_co_copy_range for the parameter and return value
- * semantics.
- */
- int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs,
- BdrvChild *src,
- uint64_t offset,
- BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
- /* Map [offset, offset + nbytes) range onto a child of bs to copy data to,
- * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
- * operation if @bs is the leaf and @src has the same BlockDriver. Return
- * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
- *
- * See the comment of bdrv_co_copy_range for the parameter and return value
- * semantics.
- */
- int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs,
- BdrvChild *src,
- uint64_t src_offset,
- BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
- /*
- * Building block for bdrv_block_status[_above] and
- * bdrv_is_allocated[_above]. The driver should answer only
- * according to the current layer, and should only need to set
- * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
- * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
- * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
- * block.h for the overall meaning of the bits. As a hint, the
- * flag want_zero is true if the caller cares more about precise
- * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
- * overall allocation (favor larger *pnum, perhaps by reporting
- * _DATA instead of _ZERO). The block layer guarantees input
- * clamped to bdrv_getlength() and aligned to request_alignment,
- * as well as non-NULL pnum, map, and file; in turn, the driver
- * must return an error or set pnum to an aligned non-zero value.
- */
- int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
- bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
- int64_t *map, BlockDriverState **file);
-
- /*
- * Invalidate any cached meta-data.
- */
- void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs,
- Error **errp);
- int (*bdrv_inactivate)(BlockDriverState *bs);
-
- /*
- * Flushes all data for all layers by calling bdrv_co_flush for underlying
- * layers, if needed. This function is needed for deterministic
- * synchronization of the flush finishing callback.
- */
- int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
-
- /* Delete a created file. */
- int coroutine_fn (*bdrv_co_delete_file)(BlockDriverState *bs,
- Error **errp);
-
- /*
- * Flushes all data that was already written to the OS all the way down to
- * the disk (for example file-posix.c calls fsync()).
- */
- int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
-
- /*
- * Flushes all internal caches to the OS. The data may still sit in a
- * writeback cache of the host OS, but it will survive a crash of the qemu
- * process.
- */
- int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
-
- /*
- * Drivers setting this field must be able to work with just a plain
- * filename with '<protocol_name>:' as a prefix, and no other options.
- * Options may be extracted from the filename by implementing
- * bdrv_parse_filename.
- */
- const char *protocol_name;
-
- /*
- * Truncate @bs to @offset bytes using the given @prealloc mode
- * when growing. Modes other than PREALLOC_MODE_OFF should be
- * rejected when shrinking @bs.
- *
- * If @exact is true, @bs must be resized to exactly @offset.
- * Otherwise, it is sufficient for @bs (if it is a host block
- * device and thus there is no way to resize it) to be at least
- * @offset bytes in length.
- *
- * If @exact is true and this function fails but would succeed
- * with @exact = false, it should return -ENOTSUP.
- */
- int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset,
- bool exact, PreallocMode prealloc,
- BdrvRequestFlags flags, Error **errp);
-
- int64_t (*bdrv_getlength)(BlockDriverState *bs);
- bool has_variable_length;
- int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
- BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
- Error **errp);
-
- int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov);
- int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs,
- uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
- size_t qiov_offset);
-
- int (*bdrv_snapshot_create)(BlockDriverState *bs,
- QEMUSnapshotInfo *sn_info);
- int (*bdrv_snapshot_goto)(BlockDriverState *bs,
- const char *snapshot_id);
- int (*bdrv_snapshot_delete)(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
- int (*bdrv_snapshot_list)(BlockDriverState *bs,
- QEMUSnapshotInfo **psn_info);
- int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
- int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
- ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs,
- Error **errp);
- BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
-
- int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
- QEMUIOVector *qiov,
- int64_t pos);
- int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs,
- QEMUIOVector *qiov,
- int64_t pos);
-
- int (*bdrv_change_backing_file)(BlockDriverState *bs,
- const char *backing_file, const char *backing_fmt);
-
- /* removable device specific */
- bool (*bdrv_is_inserted)(BlockDriverState *bs);
- void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
- void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
-
- /* to control generic scsi devices */
- BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
- unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque);
- int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs,
- unsigned long int req, void *buf);
-
- /* List of options for creating images, terminated by name == NULL */
- QemuOptsList *create_opts;
-
- /* List of options for image amend */
- QemuOptsList *amend_opts;
-
- /*
- * If this driver supports reopening images this contains a
- * NULL-terminated list of the runtime options that can be
- * modified. If an option in this list is unspecified during
- * reopen then it _must_ be reset to its default value or return
- * an error.
- */
- const char *const *mutable_opts;
-
- /*
- * Returns 0 for completed check, -errno for internal errors.
- * The check results are stored in result.
- */
- int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs,
- BdrvCheckResult *result,
- BdrvCheckMode fix);
-
- void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
-
- /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
- int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
- const char *tag);
- int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
- const char *tag);
- int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
- bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
-
- void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
-
- /*
- * Returns 1 if newly created images are guaranteed to contain only
- * zeros, 0 otherwise.
- */
- int (*bdrv_has_zero_init)(BlockDriverState *bs);
-
- /* Remove fd handlers, timers, and other event loop callbacks so the event
- * loop is no longer in use. Called with no in-flight requests and in
- * depth-first traversal order with parents before child nodes.
- */
- void (*bdrv_detach_aio_context)(BlockDriverState *bs);
-
- /* Add fd handlers, timers, and other event loop callbacks so I/O requests
- * can be processed again. Called with no in-flight requests and in
- * depth-first traversal order with child nodes before parent nodes.
- */
- void (*bdrv_attach_aio_context)(BlockDriverState *bs,
- AioContext *new_context);
-
- /* io queue for linux-aio */
- void (*bdrv_io_plug)(BlockDriverState *bs);
- void (*bdrv_io_unplug)(BlockDriverState *bs);
-
- /**
- * Try to get @bs's logical and physical block size.
- * On success, store them in @bsz and return zero.
- * On failure, return negative errno.
- */
- int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
- /**
- * Try to get @bs's geometry (cyls, heads, sectors)
- * On success, store them in @geo and return 0.
- * On failure return -errno.
- * Only drivers that want to override guest geometry implement this
- * callback; see hd_geometry_guess().
- */
- int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
-
- /**
- * bdrv_co_drain_begin is called if implemented in the beginning of a
- * drain operation to drain and stop any internal sources of requests in
- * the driver.
- * bdrv_co_drain_end is called if implemented at the end of the drain.
- *
- * They should be used by the driver to e.g. manage scheduled I/O
- * requests, or toggle an internal state. After the end of the drain new
- * requests will continue normally.
- */
- void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
- void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
-
- void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
- Error **errp);
- void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
- Error **errp);
-
- /**
- * Informs the block driver that a permission change is intended. The
- * driver checks whether the change is permissible and may take other
- * preparations for the change (e.g. get file system locks). This operation
- * is always followed either by a call to either .bdrv_set_perm or
- * .bdrv_abort_perm_update.
- *
- * Checks whether the requested set of cumulative permissions in @perm
- * can be granted for accessing @bs and whether no other users are using
- * permissions other than those given in @shared (both arguments take
- * BLK_PERM_* bitmasks).
- *
- * If both conditions are met, 0 is returned. Otherwise, -errno is returned
- * and errp is set to an error describing the conflict.
- */
- int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
- uint64_t shared, Error **errp);
-
- /**
- * Called to inform the driver that the set of cumulative set of used
- * permissions for @bs has changed to @perm, and the set of sharable
- * permission to @shared. The driver can use this to propagate changes to
- * its children (i.e. request permissions only if a parent actually needs
- * them).
- *
- * This function is only invoked after bdrv_check_perm(), so block drivers
- * may rely on preparations made in their .bdrv_check_perm implementation.
- */
- void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared);
-
- /*
- * Called to inform the driver that after a previous bdrv_check_perm()
- * call, the permission update is not performed and any preparations made
- * for it (e.g. taken file locks) need to be undone.
- *
- * This function can be called even for nodes that never saw a
- * bdrv_check_perm() call. It is a no-op then.
- */
- void (*bdrv_abort_perm_update)(BlockDriverState *bs);
-
- /**
- * Returns in @nperm and @nshared the permissions that the driver for @bs
- * needs on its child @c, based on the cumulative permissions requested by
- * the parents in @parent_perm and @parent_shared.
- *
- * If @c is NULL, return the permissions for attaching a new child for the
- * given @child_class and @role.
- *
- * If @reopen_queue is non-NULL, don't return the currently needed
- * permissions, but those that will be needed after applying the
- * @reopen_queue.
- */
- void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c,
- BdrvChildRole role,
- BlockReopenQueue *reopen_queue,
- uint64_t parent_perm, uint64_t parent_shared,
- uint64_t *nperm, uint64_t *nshared);
-
- bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
- bool (*bdrv_co_can_store_new_dirty_bitmap)(BlockDriverState *bs,
- const char *name,
- uint32_t granularity,
- Error **errp);
- int (*bdrv_co_remove_persistent_dirty_bitmap)(BlockDriverState *bs,
- const char *name,
- Error **errp);
-
- /**
- * Register/unregister a buffer for I/O. For example, when the driver is
- * interested to know the memory areas that will later be used in iovs, so
- * that it can do IOMMU mapping with VFIO etc., in order to get better
- * performance. In the case of VFIO drivers, this callback is used to do
- * DMA mapping for hot buffers.
- */
- void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size);
- void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host);
- QLIST_ENTRY(BlockDriver) list;
-
- /* Pointer to a NULL-terminated array of names of strong options
- * that can be specified for bdrv_open(). A strong option is one
- * that changes the data of a BDS.
- * If this pointer is NULL, the array is considered empty.
- * "filename" and "driver" are always considered strong. */
- const char *const *strong_runtime_opts;
-};
-
-static inline bool block_driver_can_compress(BlockDriver *drv)
-{
- return drv->bdrv_co_pwritev_compressed ||
- drv->bdrv_co_pwritev_compressed_part;
-}
-
-typedef struct BlockLimits {
- /* Alignment requirement, in bytes, for offset/length of I/O
- * requests. Must be a power of 2 less than INT_MAX; defaults to
- * 1 for drivers with modern byte interfaces, and to 512
- * otherwise. */
- uint32_t request_alignment;
-
- /* Maximum number of bytes that can be discarded at once (since it
- * is signed, it must be < 2G, if set). Must be multiple of
- * pdiscard_alignment, but need not be power of 2. May be 0 if no
- * inherent 32-bit limit */
- int32_t max_pdiscard;
-
- /* Optimal alignment for discard requests in bytes. A power of 2
- * is best but not mandatory. Must be a multiple of
- * bl.request_alignment, and must be less than max_pdiscard if
- * that is set. May be 0 if bl.request_alignment is good enough */
- uint32_t pdiscard_alignment;
-
- /* Maximum number of bytes that can zeroized at once (since it is
- * signed, it must be < 2G, if set). Must be multiple of
- * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
- int32_t max_pwrite_zeroes;
-
- /* Optimal alignment for write zeroes requests in bytes. A power
- * of 2 is best but not mandatory. Must be a multiple of
- * bl.request_alignment, and must be less than max_pwrite_zeroes
- * if that is set. May be 0 if bl.request_alignment is good
- * enough */
- uint32_t pwrite_zeroes_alignment;
-
- /* Optimal transfer length in bytes. A power of 2 is best but not
- * mandatory. Must be a multiple of bl.request_alignment, or 0 if
- * no preferred size */
- uint32_t opt_transfer;
-
- /* Maximal transfer length in bytes. Need not be power of 2, but
- * must be multiple of opt_transfer and bl.request_alignment, or 0
- * for no 32-bit limit. For now, anything larger than INT_MAX is
- * clamped down. */
- uint32_t max_transfer;
-
- /* memory alignment, in bytes so that no bounce buffer is needed */
- size_t min_mem_alignment;
-
- /* memory alignment, in bytes, for bounce buffer */
- size_t opt_mem_alignment;
-
- /* maximum number of iovec elements */
- int max_iov;
-} BlockLimits;
-
-typedef struct BdrvOpBlocker BdrvOpBlocker;
-
-typedef struct BdrvAioNotifier {
- void (*attached_aio_context)(AioContext *new_context, void *opaque);
- void (*detach_aio_context)(void *opaque);
-
- void *opaque;
- bool deleted;
-
- QLIST_ENTRY(BdrvAioNotifier) list;
-} BdrvAioNotifier;
-
-struct BdrvChildClass {
- /* If true, bdrv_replace_node() doesn't change the node this BdrvChild
- * points to. */
- bool stay_at_node;
-
- /* If true, the parent is a BlockDriverState and bdrv_next_all_states()
- * will return it. This information is used for drain_all, where every node
- * will be drained separately, so the drain only needs to be propagated to
- * non-BDS parents. */
- bool parent_is_bds;
-
- void (*inherit_options)(BdrvChildRole role, bool parent_is_format,
- int *child_flags, QDict *child_options,
- int parent_flags, QDict *parent_options);
-
- void (*change_media)(BdrvChild *child, bool load);
- void (*resize)(BdrvChild *child);
-
- /* Returns a name that is supposedly more useful for human users than the
- * node name for identifying the node in question (in particular, a BB
- * name), or NULL if the parent can't provide a better name. */
- const char *(*get_name)(BdrvChild *child);
-
- /* Returns a malloced string that describes the parent of the child for a
- * human reader. This could be a node-name, BlockBackend name, qdev ID or
- * QOM path of the device owning the BlockBackend, job type and ID etc. The
- * caller is responsible for freeing the memory. */
- char *(*get_parent_desc)(BdrvChild *child);
-
- /*
- * If this pair of functions is implemented, the parent doesn't issue new
- * requests after returning from .drained_begin() until .drained_end() is
- * called.
- *
- * These functions must not change the graph (and therefore also must not
- * call aio_poll(), which could change the graph indirectly).
- *
- * If drained_end() schedules background operations, it must atomically
- * increment *drained_end_counter for each such operation and atomically
- * decrement it once the operation has settled.
- *
- * Note that this can be nested. If drained_begin() was called twice, new
- * I/O is allowed only after drained_end() was called twice, too.
- */
- void (*drained_begin)(BdrvChild *child);
- void (*drained_end)(BdrvChild *child, int *drained_end_counter);
-
- /*
- * Returns whether the parent has pending requests for the child. This
- * callback is polled after .drained_begin() has been called until all
- * activity on the child has stopped.
- */
- bool (*drained_poll)(BdrvChild *child);
-
- /* Notifies the parent that the child has been activated/inactivated (e.g.
- * when migration is completing) and it can start/stop requesting
- * permissions and doing I/O on it. */
- void (*activate)(BdrvChild *child, Error **errp);
- int (*inactivate)(BdrvChild *child);
-
- void (*attach)(BdrvChild *child);
- void (*detach)(BdrvChild *child);
-
- /* Notifies the parent that the filename of its child has changed (e.g.
- * because the direct child was removed from the backing chain), so that it
- * can update its reference. */
- int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
- const char *filename, Error **errp);
-
- bool (*can_set_aio_ctx)(BdrvChild *child, AioContext *ctx,
- GSList **ignore, Error **errp);
- void (*set_aio_ctx)(BdrvChild *child, AioContext *ctx, GSList **ignore);
-};
-
-extern const BdrvChildClass child_of_bds;
-
-struct BdrvChild {
- BlockDriverState *bs;
- char *name;
- const BdrvChildClass *klass;
- BdrvChildRole role;
- void *opaque;
-
- /**
- * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
- */
- uint64_t perm;
-
- /**
- * Permissions that can still be granted to other users of @bs while this
- * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
- */
- uint64_t shared_perm;
-
- /* backup of permissions during permission update procedure */
- bool has_backup_perm;
- uint64_t backup_perm;
- uint64_t backup_shared_perm;
-
- /*
- * This link is frozen: the child can neither be replaced nor
- * detached from the parent.
- */
- bool frozen;
-
- /*
- * How many times the parent of this child has been drained
- * (through klass->drained_*).
- * Usually, this is equal to bs->quiesce_counter (potentially
- * reduced by bdrv_drain_all_count). It may differ while the
- * child is entering or leaving a drained section.
- */
- int parent_quiesce_counter;
-
- QLIST_ENTRY(BdrvChild) next;
- QLIST_ENTRY(BdrvChild) next_parent;
-};
-
-/*
- * Note: the function bdrv_append() copies and swaps contents of
- * BlockDriverStates, so if you add new fields to this struct, please
- * inspect bdrv_append() to determine if the new fields need to be
- * copied as well.
- */
-struct BlockDriverState {
- /* Protected by big QEMU lock or read-only after opening. No special
- * locking needed during I/O...
- */
- int open_flags; /* flags used to open the file, re-used for re-open */
- bool read_only; /* if true, the media is read only */
- bool encrypted; /* if true, the media is encrypted */
- bool sg; /* if true, the device is a /dev/sg* */
- bool probed; /* if true, format was probed rather than specified */
- bool force_share; /* if true, always allow all shared permissions */
- bool implicit; /* if true, this filter node was automatically inserted */
-
- BlockDriver *drv; /* NULL means no media */
- void *opaque;
-
- AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
- /* long-running tasks intended to always use the same AioContext as this
- * BDS may register themselves in this list to be notified of changes
- * regarding this BDS's context */
- QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
- bool walking_aio_notifiers; /* to make removal during iteration safe */
-
- char filename[PATH_MAX];
- char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
- this file image */
- /* The backing filename indicated by the image header; if we ever
- * open this file, then this is replaced by the resulting BDS's
- * filename (i.e. after a bdrv_refresh_filename() run). */
- char auto_backing_file[PATH_MAX];
- char backing_format[16]; /* if non-zero and backing_file exists */
-
- QDict *full_open_options;
- char exact_filename[PATH_MAX];
-
- BdrvChild *backing;
- BdrvChild *file;
-
- /* I/O Limits */
- BlockLimits bl;
-
- /* Flags honored during pwrite (so far: BDRV_REQ_FUA,
- * BDRV_REQ_WRITE_UNCHANGED).
- * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
- * writes will be issued as normal writes without the flag set.
- * This is important to note for drivers that do not explicitly
- * request a WRITE permission for their children and instead take
- * the same permissions as their parent did (this is commonly what
- * block filters do). Such drivers have to be aware that the
- * parent may have taken a WRITE_UNCHANGED permission only and is
- * issuing such requests. Drivers either must make sure that
- * these requests do not result in plain WRITE accesses (usually
- * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
- * every incoming write request as-is, including potentially that
- * flag), or they have to explicitly take the WRITE permission for
- * their children. */
- unsigned int supported_write_flags;
- /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
- * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */
- unsigned int supported_zero_flags;
- /*
- * Flags honoured during truncate (so far: BDRV_REQ_ZERO_WRITE).
- *
- * If BDRV_REQ_ZERO_WRITE is given, the truncate operation must make sure
- * that any added space reads as all zeros. If this can't be guaranteed,
- * the operation must fail.
- */
- unsigned int supported_truncate_flags;
-
- /* the following member gives a name to every node on the bs graph. */
- char node_name[32];
- /* element of the list of named nodes building the graph */
- QTAILQ_ENTRY(BlockDriverState) node_list;
- /* element of the list of all BlockDriverStates (all_bdrv_states) */
- QTAILQ_ENTRY(BlockDriverState) bs_list;
- /* element of the list of monitor-owned BDS */
- QTAILQ_ENTRY(BlockDriverState) monitor_list;
- int refcnt;
-
- /* operation blockers */
- QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
-
- /* The node that this node inherited default options from (and a reopen on
- * which can affect this node by changing these defaults). This is always a
- * parent node of this node. */
- BlockDriverState *inherits_from;
- QLIST_HEAD(, BdrvChild) children;
- QLIST_HEAD(, BdrvChild) parents;
-
- QDict *options;
- QDict *explicit_options;
- BlockdevDetectZeroesOptions detect_zeroes;
-
- /* The error object in use for blocking operations on backing_hd */
- Error *backing_blocker;
-
- /* Protected by AioContext lock */
-
- /* If we are reading a disk image, give its size in sectors.
- * Generally read-only; it is written to by load_snapshot and
- * save_snaphost, but the block layer is quiescent during those.
- */
- int64_t total_sectors;
-
- /* Callback before write request is processed */
- NotifierWithReturnList before_write_notifiers;
-
- /* threshold limit for writes, in bytes. "High water mark". */
- uint64_t write_threshold_offset;
- NotifierWithReturn write_threshold_notifier;
-
- /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
- * Reading from the list can be done with either the BQL or the
- * dirty_bitmap_mutex. Modifying a bitmap only requires
- * dirty_bitmap_mutex. */
- QemuMutex dirty_bitmap_mutex;
- QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
-
- /* Offset after the highest byte written to */
- Stat64 wr_highest_offset;
-
- /* If true, copy read backing sectors into image. Can be >1 if more
- * than one client has requested copy-on-read. Accessed with atomic
- * ops.
- */
- int copy_on_read;
-
- /* number of in-flight requests; overall and serialising.
- * Accessed with atomic ops.
- */
- unsigned int in_flight;
- unsigned int serialising_in_flight;
-
- /* counter for nested bdrv_io_plug.
- * Accessed with atomic ops.
- */
- unsigned io_plugged;
-
- /* do we need to tell the quest if we have a volatile write cache? */
- int enable_write_cache;
-
- /* Accessed with atomic ops. */
- int quiesce_counter;
- int recursive_quiesce_counter;
-
- unsigned int write_gen; /* Current data generation */
-
- /* Protected by reqs_lock. */
- CoMutex reqs_lock;
- QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
- CoQueue flush_queue; /* Serializing flush queue */
- bool active_flush_req; /* Flush request in flight? */
-
- /* Only read/written by whoever has set active_flush_req to true. */
- unsigned int flushed_gen; /* Flushed write generation */
-
- /* BdrvChild links to this node may never be frozen */
- bool never_freeze;
-};
-
-struct BlockBackendRootState {
- int open_flags;
- bool read_only;
- BlockdevDetectZeroesOptions detect_zeroes;
-};
-
-typedef enum BlockMirrorBackingMode {
- /* Reuse the existing backing chain from the source for the target.
- * - sync=full: Set backing BDS to NULL.
- * - sync=top: Use source's backing BDS.
- * - sync=none: Use source as the backing BDS. */
- MIRROR_SOURCE_BACKING_CHAIN,
-
- /* Open the target's backing chain completely anew */
- MIRROR_OPEN_BACKING_CHAIN,
-
- /* Do not change the target's backing BDS after job completion */
- MIRROR_LEAVE_BACKING_CHAIN,
-} BlockMirrorBackingMode;
-
-static inline BlockDriverState *backing_bs(BlockDriverState *bs)
-{
- return bs->backing ? bs->backing->bs : NULL;
-}
-
-
-/* Essential block drivers which must always be statically linked into qemu, and
- * which therefore can be accessed without using bdrv_find_format() */
-extern BlockDriver bdrv_file;
-extern BlockDriver bdrv_raw;
-extern BlockDriver bdrv_qcow2;
-
-int coroutine_fn bdrv_co_preadv(BdrvChild *child,
- int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
- int64_t offset, unsigned int bytes,
- QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
-int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
- int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
- int64_t offset, unsigned int bytes,
- QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
-
-static inline int coroutine_fn bdrv_co_pread(BdrvChild *child,
- int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
-
- return bdrv_co_preadv(child, offset, bytes, &qiov, flags);
-}
-
-static inline int coroutine_fn bdrv_co_pwrite(BdrvChild *child,
- int64_t offset, unsigned int bytes, void *buf, BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
-
- return bdrv_co_pwritev(child, offset, bytes, &qiov, flags);
-}
-
-extern unsigned int bdrv_drain_all_count;
-void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
-void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
-
-bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align);
-BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
-
-int get_tmp_filename(char *filename, int size);
-BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
- const char *filename);
-
-void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
- QDict *options);
-
-
-/**
- * bdrv_add_before_write_notifier:
- *
- * Register a callback that is invoked before write requests are processed but
- * after any throttling or waiting for overlapping requests.
- */
-void bdrv_add_before_write_notifier(BlockDriverState *bs,
- NotifierWithReturn *notifier);
-
-/**
- * bdrv_add_aio_context_notifier:
- *
- * If a long-running job intends to be always run in the same AioContext as a
- * certain BDS, it may use this function to be notified of changes regarding the
- * association of the BDS to an AioContext.
- *
- * attached_aio_context() is called after the target BDS has been attached to a
- * new AioContext; detach_aio_context() is called before the target BDS is being
- * detached from its old AioContext.
- */
-void bdrv_add_aio_context_notifier(BlockDriverState *bs,
- void (*attached_aio_context)(AioContext *new_context, void *opaque),
- void (*detach_aio_context)(void *opaque), void *opaque);
-
-/**
- * bdrv_remove_aio_context_notifier:
- *
- * Unsubscribe of change notifications regarding the BDS's AioContext. The
- * parameters given here have to be the same as those given to
- * bdrv_add_aio_context_notifier().
- */
-void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
- void (*aio_context_attached)(AioContext *,
- void *),
- void (*aio_context_detached)(void *),
- void *opaque);
-
-/**
- * bdrv_wakeup:
- * @bs: The BlockDriverState for which an I/O operation has been completed.
- *
- * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
- * synchronous I/O on a BlockDriverState that is attached to another
- * I/O thread, the main thread lets the I/O thread's event loop run,
- * waiting for the I/O operation to complete. A bdrv_wakeup will wake
- * up the main thread if necessary.
- *
- * Manual calls to bdrv_wakeup are rarely necessary, because
- * bdrv_dec_in_flight already calls it.
- */
-void bdrv_wakeup(BlockDriverState *bs);
-
-#ifdef _WIN32
-int is_windows_drive(const char *filename);
-#endif
-
-/**
- * stream_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @base: Block device that will become the new base, or %NULL to
- * flatten the whole backing file chain onto @bs.
- * @backing_file_str: The file name that will be written to @bs as the
- * the new backing file if the job completes. Ignored if @base is %NULL.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @errp: Error object.
- *
- * Start a streaming operation on @bs. Clusters that are unallocated
- * in @bs, but allocated in any image between @base and @bs (both
- * exclusive) will be written to @bs. At the end of a successful
- * streaming job, the backing file of @bs will be changed to
- * @backing_file_str in the written image and to @base in the live
- * BlockDriverState.
- */
-void stream_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, const char *backing_file_str,
- int creation_flags, int64_t speed,
- BlockdevOnError on_error, Error **errp);
-
-/**
- * commit_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Active block device.
- * @top: Top block device to be committed.
- * @base: Block device that will be written into, and become the new top.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @backing_file_str: String to use as the backing file in @top's overlay
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the commit job inserts into the graph above @top. NULL means
- * that a node name should be autogenerated.
- * @errp: Error object.
- *
- */
-void commit_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, BlockDriverState *top,
- int creation_flags, int64_t speed,
- BlockdevOnError on_error, const char *backing_file_str,
- const char *filter_node_name, Error **errp);
-/**
- * commit_active_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Active block device to be committed.
- * @base: Block device that will be written into, and become the new top.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @on_error: The action to take upon error.
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the commit job inserts into the graph above @bs. NULL means that
- * a node name should be autogenerated.
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
- * @auto_complete: Auto complete the job.
- * @errp: Error object.
- *
- */
-BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int creation_flags,
- int64_t speed, BlockdevOnError on_error,
- const char *filter_node_name,
- BlockCompletionFunc *cb, void *opaque,
- bool auto_complete, Error **errp);
-/*
- * mirror_start:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @target: Block device to write to.
- * @replaces: Block graph node name to replace once the mirror is done. Can
- * only be used when full mirroring is selected.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @granularity: The chosen granularity for the dirty bitmap.
- * @buf_size: The amount of data that can be in flight at one time.
- * @mode: Whether to collapse all images in the chain to the target.
- * @backing_mode: How to establish the target's backing chain after completion.
- * @zero_target: Whether the target should be explicitly zero-initialized
- * @on_source_error: The action to take upon error reading from the source.
- * @on_target_error: The action to take upon error writing to the target.
- * @unmap: Whether to unmap target where source sectors only contain zeroes.
- * @filter_node_name: The node name that should be assigned to the filter
- * driver that the mirror job inserts into the graph above @bs. NULL means that
- * a node name should be autogenerated.
- * @copy_mode: When to trigger writes to the target.
- * @errp: Error object.
- *
- * Start a mirroring operation on @bs. Clusters that are allocated
- * in @bs will be written to @target until the job is cancelled or
- * manually completed. At the end of a successful mirroring job,
- * @bs will be switched to read from @target.
- */
-void mirror_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *target, const char *replaces,
- int creation_flags, int64_t speed,
- uint32_t granularity, int64_t buf_size,
- MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
- bool zero_target,
- BlockdevOnError on_source_error,
- BlockdevOnError on_target_error,
- bool unmap, const char *filter_node_name,
- MirrorCopyMode copy_mode, Error **errp);
-
-/*
- * backup_job_create:
- * @job_id: The id of the newly-created job, or %NULL to use the
- * device name of @bs.
- * @bs: Block device to operate on.
- * @target: Block device to write to.
- * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
- * @sync_mode: What parts of the disk image should be copied to the destination.
- * @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
- * @bitmap_mode: The bitmap synchronization policy to use.
- * @on_source_error: The action to take upon error reading from the source.
- * @on_target_error: The action to take upon error writing to the target.
- * @creation_flags: Flags that control the behavior of the Job lifetime.
- * See @BlockJobCreateFlags
- * @cb: Completion function for the job.
- * @opaque: Opaque pointer value passed to @cb.
- * @txn: Transaction that this job is part of (may be NULL).
- *
- * Create a backup operation on @bs. Clusters in @bs are written to @target
- * until the job is cancelled or manually completed.
- */
-BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
- BlockDriverState *target, int64_t speed,
- MirrorSyncMode sync_mode,
- BdrvDirtyBitmap *sync_bitmap,
- BitmapSyncMode bitmap_mode,
- bool compress,
- const char *filter_node_name,
- BlockdevOnError on_source_error,
- BlockdevOnError on_target_error,
- int creation_flags,
- BlockCompletionFunc *cb, void *opaque,
- JobTxn *txn, Error **errp);
-
-BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildClass *child_class,
- BdrvChildRole child_role,
- AioContext *ctx,
- uint64_t perm, uint64_t shared_perm,
- void *opaque, Error **errp);
-void bdrv_root_unref_child(BdrvChild *child);
-
-void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
- uint64_t *shared_perm);
-
-/**
- * Sets a BdrvChild's permissions. Avoid if the parent is a BDS; use
- * bdrv_child_refresh_perms() instead and make the parent's
- * .bdrv_child_perm() implementation return the correct values.
- */
-int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
- Error **errp);
-
-/**
- * Calls bs->drv->bdrv_child_perm() and updates the child's permission
- * masks with the result.
- * Drivers should invoke this function whenever an event occurs that
- * makes their .bdrv_child_perm() implementation return different
- * values than before, but which will not result in the block layer
- * automatically refreshing the permissions.
- */
-int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp);
-
-bool bdrv_recurse_can_replace(BlockDriverState *bs,
- BlockDriverState *to_replace);
-
-/*
- * Default implementation for BlockDriver.bdrv_child_perm() that can
- * be used by block filters and image formats, as long as they use the
- * child_of_bds child class and set an appropriate BdrvChildRole.
- */
-void bdrv_default_perms(BlockDriverState *bs, BdrvChild *c,
- BdrvChildRole role, BlockReopenQueue *reopen_queue,
- uint64_t perm, uint64_t shared,
- uint64_t *nperm, uint64_t *nshared);
-
-/*
- * Default implementation for drivers to pass bdrv_co_block_status() to
- * their file.
- */
-int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
- bool want_zero,
- int64_t offset,
- int64_t bytes,
- int64_t *pnum,
- int64_t *map,
- BlockDriverState **file);
-/*
- * Default implementation for drivers to pass bdrv_co_block_status() to
- * their backing file.
- */
-int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
- bool want_zero,
- int64_t offset,
- int64_t bytes,
- int64_t *pnum,
- int64_t *map,
- BlockDriverState **file);
-const char *bdrv_get_parent_name(const BlockDriverState *bs);
-void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
-bool blk_dev_has_removable_media(BlockBackend *blk);
-bool blk_dev_has_tray(BlockBackend *blk);
-void blk_dev_eject_request(BlockBackend *blk, bool force);
-bool blk_dev_is_tray_open(BlockBackend *blk);
-bool blk_dev_is_medium_locked(BlockBackend *blk);
-
-void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
-
-void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
-void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
-bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
- const BdrvDirtyBitmap *src,
- HBitmap **backup, bool lock);
-
-void bdrv_inc_in_flight(BlockDriverState *bs);
-void bdrv_dec_in_flight(BlockDriverState *bs);
-
-void blockdev_close_all_bdrv_states(void);
-
-int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
- BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
-int refresh_total_sectors(BlockDriverState *bs, int64_t hint);
-
-void bdrv_set_monitor_owned(BlockDriverState *bs);
-BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp);
-
-/**
- * Simple implementation of bdrv_co_create_opts for protocol drivers
- * which only support creation via opening a file
- * (usually existing raw storage device)
- */
-int coroutine_fn bdrv_co_create_opts_simple(BlockDriver *drv,
- const char *filename,
- QemuOpts *opts,
- Error **errp);
-extern QemuOptsList bdrv_create_opts_simple;
-
-BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
- const char *name,
- BlockDriverState **pbs,
- Error **errp);
-BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
- BlockDirtyBitmapMergeSourceList *bms,
- HBitmap **backup, Error **errp);
-BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name,
- bool release,
- BlockDriverState **bitmap_bs,
- Error **errp);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
#endif /* BLOCK_INT_H */
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 35faa3aa26..7061ab7201 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -26,8 +26,8 @@
#ifndef BLOCKJOB_H
#define BLOCKJOB_H
+#include "qapi/qapi-types-block-core.h"
#include "qemu/job.h"
-#include "block/block.h"
#include "qemu/ratelimit.h"
#define BLOCK_JOB_SLICE_TIME 100000000ULL /* ns */
@@ -40,24 +40,38 @@ typedef struct BlockJobDriver BlockJobDriver;
* Long-running operation on a BlockDriverState.
*/
typedef struct BlockJob {
- /** Data belonging to the generic Job infrastructure */
+ /**
+ * Data belonging to the generic Job infrastructure.
+ * Protected by job mutex.
+ */
Job job;
- /** The block device on which the job is operating. */
- BlockBackend *blk;
-
- /** Status that is published by the query-block-jobs QMP API */
+ /**
+ * Status that is published by the query-block-jobs QMP API.
+ * Protected by job mutex.
+ */
BlockDeviceIoStatus iostatus;
- /** Speed that was set with @block_job_set_speed. */
+ /**
+ * Speed that was set with @block_job_set_speed.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
int64_t speed;
- /** Rate limiting data structure for implementing @speed. */
+ /**
+ * Rate limiting data structure for implementing @speed.
+ * RateLimit API is thread-safe.
+ */
RateLimit limit;
- /** Block other operations when block job is running */
+ /**
+ * Block other operations when block job is running.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
Error *blocker;
+ /** All notifiers are set once in block_job_create() and never modified. */
+
/** Called when a cancelled job is finalised. */
Notifier finalize_cancelled_notifier;
@@ -73,20 +87,31 @@ typedef struct BlockJob {
/** Called when the job coroutine yields or terminates */
Notifier idle_notifier;
- /** BlockDriverStates that are involved in this block job */
+ /**
+ * BlockDriverStates that are involved in this block job.
+ * Always modified and read under the BQL (GLOBAL_STATE_CODE).
+ */
GSList *nodes;
} BlockJob;
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
- * block_job_next:
+ * block_job_next_locked:
* @job: A block job, or %NULL.
*
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock held.
*/
-BlockJob *block_job_next(BlockJob *job);
+BlockJob *block_job_next_locked(BlockJob *job);
/**
* block_job_get:
@@ -95,9 +120,13 @@ BlockJob *block_job_next(BlockJob *job);
* Get the block job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock *not* held.
*/
BlockJob *block_job_get(const char *id);
+/* Same as block_job_get(), but called with job lock held. */
+BlockJob *block_job_get_locked(const char *id);
+
/**
* block_job_add_bdrv:
* @job: A block job
@@ -109,8 +138,9 @@ BlockJob *block_job_get(const char *id);
* @job. This means that all operations will be blocked on @bs while
* @job exists.
*/
-int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
- uint64_t perm, uint64_t shared_perm, Error **errp);
+int GRAPH_WRLOCK
+block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
+ uint64_t perm, uint64_t shared_perm, Error **errp);
/**
* block_job_remove_all_bdrv:
@@ -131,32 +161,64 @@ void block_job_remove_all_bdrv(BlockJob *job);
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs);
/**
- * block_job_set_speed:
+ * block_job_set_speed_locked:
* @job: The job to set the speed for.
* @speed: The new value
* @errp: Error object.
*
* Set a rate-limiting parameter for the job; the actual meaning may
* vary depending on the job type.
+ *
+ * Called with job lock held, but might release it temporarily.
+ */
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp);
+
+/**
+ * block_job_change_locked:
+ * @job: The job to change.
+ * @opts: The new options.
+ * @errp: Error object.
+ *
+ * Change the job according to opts.
*/
-void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
+void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
+ Error **errp);
/**
- * block_job_query:
+ * block_job_query_locked:
* @job: The job to get information about.
*
* Return information about a job.
+ *
+ * Called with job lock held.
*/
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp);
/**
- * block_job_iostatus_reset:
+ * block_job_iostatus_reset_locked:
* @job: The job whose I/O status should be reset.
*
* Reset I/O status on @job and on BlockDriverState objects it uses,
* other than job->blk.
+ *
+ * Called with job lock held.
+ */
+void block_job_iostatus_reset_locked(BlockJob *job);
+
+/*
+ * block_job_get_aio_context:
+ *
+ * Returns aio context associated with a block job.
+ */
+AioContext *block_job_get_aio_context(BlockJob *job);
+
+
+/*
+ * Common functions that are neither I/O nor Global State.
+ *
+ * See include/block/block-common.h for more information about
+ * the Common API.
*/
-void block_job_iostatus_reset(BlockJob *job);
/**
* block_job_is_internal:
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index e2824a36a8..4c3d2e25a2 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -27,7 +27,6 @@
#define BLOCKJOB_INT_H
#include "block/blockjob.h"
-#include "block/block.h"
/**
* BlockJobDriver:
@@ -39,6 +38,13 @@ struct BlockJobDriver {
JobDriver job_driver;
/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ /*
* Returns whether the job has pending requests for the child or will
* submit new requests before the next pause point. This callback is polled
* in the context of draining a job node after requesting that the job be
@@ -47,13 +53,41 @@ struct BlockJobDriver {
bool (*drained_poll)(BlockJob *job);
/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
* If the callback is not NULL, it will be invoked before the job is
* resumed in a new AioContext. This is the place to move any resources
* besides job->blk to the new AioContext.
*/
void (*attached_aio_context)(BlockJob *job, AioContext *new_context);
+
+ void (*set_speed)(BlockJob *job, int64_t speed);
+
+ /*
+ * Change the @job's options according to @opts.
+ *
+ * Note that this can already be called before the job coroutine is running.
+ */
+ void (*change)(BlockJob *job, BlockJobChangeOptions *opts, Error **errp);
+
+ /*
+ * Query information specific to this kind of block job.
+ */
+ void (*query)(BlockJob *job, BlockJobInfo *info);
};
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
* block_job_create:
* @job_id: The id of the newly-created job, or %NULL to have one
@@ -77,10 +111,11 @@ struct BlockJobDriver {
* This function is not part of the public job interface; it should be
* called from a wrapper that is specific to the job type.
*/
-void *block_job_create(const char *job_id, const BlockJobDriver *driver,
- JobTxn *txn, BlockDriverState *bs, uint64_t perm,
- uint64_t shared_perm, int64_t speed, int flags,
- BlockCompletionFunc *cb, void *opaque, Error **errp);
+void * GRAPH_UNLOCKED
+block_job_create(const char *job_id, const BlockJobDriver *driver,
+ JobTxn *txn, BlockDriverState *bs, uint64_t perm,
+ uint64_t shared_perm, int64_t speed, int flags,
+ BlockCompletionFunc *cb, void *opaque, Error **errp);
/**
* block_job_free:
@@ -96,13 +131,26 @@ void block_job_free(Job *job);
*/
void block_job_user_resume(Job *job);
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
/**
- * block_job_ratelimit_get_delay:
+ * block_job_ratelimit_processed_bytes:
*
- * Calculate and return delay for the next request in ns. See the documentation
- * of ratelimit_calculate_delay() for details.
+ * To be called after some work has been done. Adjusts the delay for the next
+ * request. See the documentation of ratelimit_calculate_delay() for details.
+ */
+void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n);
+
+/**
+ * Put the job to sleep (assuming that it wasn't canceled) to throttle it to the
+ * right speed according to its rate limiting.
*/
-int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n);
+void block_job_ratelimit_sleep(BlockJob *job);
/**
* block_job_error_action:
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index 36e8da4fc2..fa956debfb 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -1,6 +1,7 @@
#ifndef BLOCK_DIRTY_BITMAP_H
#define BLOCK_DIRTY_BITMAP_H
+#include "block/block-common.h"
#include "qapi/qapi-types-block-core.h"
#include "qemu/hbitmap.h"
@@ -34,8 +35,14 @@ int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags,
Error **errp);
void bdrv_release_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs);
-int bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
- Error **errp);
+
+int coroutine_fn GRAPH_RDLOCK
+bdrv_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
+ Error **errp);
+int co_wrapper_bdrv_rdlock
+bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
+ Error **errp);
+
void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap);
void bdrv_enable_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap);
@@ -46,7 +53,6 @@ bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap);
bool bdrv_dirty_bitmap_has_successor(BdrvDirtyBitmap *bitmap);
const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap);
int64_t bdrv_dirty_bitmap_size(const BdrvDirtyBitmap *bitmap);
-DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap);
void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes);
void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
@@ -57,6 +63,8 @@ void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter);
uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes);
uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap);
+uint64_t bdrv_dirty_bitmap_serialization_coverage(int serialized_chunk_size,
+ const BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes);
@@ -76,7 +84,7 @@ void bdrv_dirty_bitmap_set_persistence(BdrvDirtyBitmap *bitmap,
bool persistent);
void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy);
-void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
+bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
HBitmap **backup, Error **errp);
void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip);
bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset);
@@ -114,6 +122,8 @@ int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset,
bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
int64_t start, int64_t end, int64_t max_dirty_count,
int64_t *dirty_start, int64_t *dirty_count);
+bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset,
+ int64_t bytes, int64_t *count);
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
Error **errp);
diff --git a/include/block/export.h b/include/block/export.h
new file mode 100644
index 0000000000..f2fe0f8078
--- /dev/null
+++ b/include/block/export.h
@@ -0,0 +1,91 @@
+/*
+ * Declarations for block exports
+ *
+ * Copyright (c) 2012, 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ * Kevin Wolf <kwolf@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef BLOCK_EXPORT_H
+#define BLOCK_EXPORT_H
+
+#include "qapi/qapi-types-block-export.h"
+#include "qemu/queue.h"
+
+typedef struct BlockExport BlockExport;
+
+typedef struct BlockExportDriver {
+ /* The export type that this driver services */
+ BlockExportType type;
+
+ /*
+ * The size of the driver-specific state that contains BlockExport as its
+ * first field.
+ */
+ size_t instance_size;
+
+ /* Creates and starts a new block export */
+ int (*create)(BlockExport *, BlockExportOptions *, Error **);
+
+ /*
+ * Frees a removed block export. This function is only called after all
+ * references have been dropped.
+ */
+ void (*delete)(BlockExport *);
+
+ /*
+ * Start to disconnect all clients and drop other references held
+ * internally by the export driver. When the function returns, there may
+ * still be active references while the export is in the process of
+ * shutting down.
+ */
+ void (*request_shutdown)(BlockExport *);
+} BlockExportDriver;
+
+struct BlockExport {
+ const BlockExportDriver *drv;
+
+ /* Unique identifier for the export */
+ char *id;
+
+ /*
+ * Reference count for this block export. This includes strong references
+ * both from the owner (qemu-nbd or the monitor) and clients connected to
+ * the export.
+ *
+ * Use atomics to access this field.
+ */
+ int refcount;
+
+ /*
+ * True if one of the references in refcount belongs to the user. After the
+ * user has dropped their reference, they may not e.g. remove the same
+ * export a second time (which would decrease the refcount without having
+ * it incremented first).
+ */
+ bool user_owned;
+
+ /* The AioContext whose lock protects this BlockExport object. */
+ AioContext *ctx;
+
+ /* The block device to export */
+ BlockBackend *blk;
+
+ /* List entry for block_exports */
+ QLIST_ENTRY(BlockExport) next;
+};
+
+BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp);
+BlockExport *blk_exp_find(const char *id);
+void blk_exp_ref(BlockExport *exp);
+void blk_exp_unref(BlockExport *exp);
+void blk_exp_request_shutdown(BlockExport *exp);
+void blk_exp_close_all(void);
+void blk_exp_close_all_type(BlockExportType type);
+
+#endif
diff --git a/include/block/fuse.h b/include/block/fuse.h
new file mode 100644
index 0000000000..ffa91fe364
--- /dev/null
+++ b/include/block/fuse.h
@@ -0,0 +1,30 @@
+/*
+ * Present a block device as a raw image through FUSE
+ *
+ * Copyright (c) 2020 Max Reitz <mreitz@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 or later of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BLOCK_FUSE_H
+#define BLOCK_FUSE_H
+
+#ifdef CONFIG_FUSE
+
+#include "block/export.h"
+
+extern const BlockExportDriver blk_exp_fuse;
+
+#endif /* CONFIG_FUSE */
+
+#endif
diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h
new file mode 100644
index 0000000000..d7545e82d0
--- /dev/null
+++ b/include/block/graph-lock.h
@@ -0,0 +1,278 @@
+/*
+ * Graph lock: rwlock to protect block layer graph manipulations (add/remove
+ * edges and nodes)
+ *
+ * Copyright (c) 2022 Red Hat
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef GRAPH_LOCK_H
+#define GRAPH_LOCK_H
+
+#include "qemu/clang-tsa.h"
+
+/**
+ * Graph Lock API
+ * This API provides a rwlock used to protect block layer
+ * graph modifications like edge (BdrvChild) and node (BlockDriverState)
+ * addition and removal.
+ * Currently we have 1 writer only, the Main loop, and many
+ * readers, mostly coroutines running in other AioContext thus other threads.
+ *
+ * We distinguish between writer (main loop, under BQL) that modifies the
+ * graph, and readers (all other coroutines running in various AioContext),
+ * that go through the graph edges, reading
+ * BlockDriverState ->parents and->children.
+ *
+ * The writer (main loop) has an "exclusive" access, so it first waits for
+ * current read to finish, and then prevents incoming ones from
+ * entering while it has the exclusive access.
+ *
+ * The readers (coroutines in multiple AioContext) are free to
+ * access the graph as long the writer is not modifying the graph.
+ * In case it is, they go in a CoQueue and sleep until the writer
+ * is done.
+ *
+ * If a coroutine changes AioContext, the counter in the original and new
+ * AioContext are left intact, since the writer does not care where is the
+ * reader, but only if there is one.
+ * As a result, some AioContexts might have a negative reader count, to
+ * balance the positive count of the AioContext that took the lock.
+ * This also means that when an AioContext is deleted it may have a nonzero
+ * reader count. In that case we transfer the count to a global shared counter
+ * so that the writer is always aware of all readers.
+ */
+typedef struct BdrvGraphRWlock BdrvGraphRWlock;
+
+/* Dummy lock object to use for Thread Safety Analysis (TSA) */
+typedef struct TSA_CAPABILITY("mutex") BdrvGraphLock {
+} BdrvGraphLock;
+
+extern BdrvGraphLock graph_lock;
+
+/*
+ * clang doesn't check consistency in locking annotations between forward
+ * declarations and the function definition. Having the annotation on the
+ * definition, but not the declaration in a header file, may give the reader
+ * a false sense of security because the condition actually remains unchecked
+ * for callers in other source files.
+ *
+ * Therefore, as a convention, for public functions, GRAPH_RDLOCK and
+ * GRAPH_WRLOCK annotations should be present only in the header file.
+ */
+#define GRAPH_WRLOCK TSA_REQUIRES(graph_lock)
+#define GRAPH_RDLOCK TSA_REQUIRES_SHARED(graph_lock)
+#define GRAPH_UNLOCKED TSA_EXCLUDES(graph_lock)
+
+/*
+ * TSA annotations are not part of function types, so checks are defeated when
+ * using a function pointer. As a workaround, annotate function pointers with
+ * this macro that will require that the lock is at least taken while reading
+ * the pointer. In most cases this is equivalent to actually protecting the
+ * function call.
+ */
+#define GRAPH_RDLOCK_PTR TSA_GUARDED_BY(graph_lock)
+#define GRAPH_WRLOCK_PTR TSA_GUARDED_BY(graph_lock)
+#define GRAPH_UNLOCKED_PTR
+
+/*
+ * register_aiocontext:
+ * Add AioContext @ctx to the list of AioContext.
+ * This list is used to obtain the total number of readers
+ * currently running the graph.
+ */
+void register_aiocontext(AioContext *ctx);
+
+/*
+ * unregister_aiocontext:
+ * Removes AioContext @ctx to the list of AioContext.
+ */
+void unregister_aiocontext(AioContext *ctx);
+
+/*
+ * bdrv_graph_wrlock:
+ * Start an exclusive write operation to modify the graph. This means we are
+ * adding or removing an edge or a node in the block layer graph. Nobody else
+ * is allowed to access the graph.
+ *
+ * Must only be called from outside bdrv_graph_co_rdlock.
+ *
+ * The wrlock can only be taken from the main loop, with BQL held, as only the
+ * main loop is allowed to modify the graph.
+ */
+void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
+bdrv_graph_wrlock(void);
+
+/*
+ * bdrv_graph_wrunlock:
+ * Write finished, reset global has_writer to 0 and restart
+ * all readers that are waiting.
+ */
+void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
+bdrv_graph_wrunlock(void);
+
+/*
+ * bdrv_graph_co_rdlock:
+ * Read the bs graph. This usually means traversing all nodes in
+ * the graph, therefore it can't happen while another thread is
+ * modifying it.
+ * Increases the reader counter of the current aiocontext,
+ * and if has_writer is set, it means that the writer is modifying
+ * the graph, therefore wait in a coroutine queue.
+ * The writer will then wake this coroutine once it is done.
+ *
+ * This lock should be taken from Iothreads (IO_CODE() class of functions)
+ * because it signals the writer that there are some
+ * readers currently running, or waits until the current
+ * write is finished before continuing.
+ * Calling this function from the Main Loop with BQL held
+ * is not necessary, since the Main Loop itself is the only
+ * writer, thus won't be able to read and write at the same time.
+ * The only exception to that is when we can't take the lock in the
+ * function/coroutine itself, and need to delegate the caller (usually main
+ * loop) to take it and wait that the coroutine ends, so that
+ * we always signal that a reader is running.
+ */
+void coroutine_fn TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_co_rdlock(void);
+
+/*
+ * bdrv_graph_rdunlock:
+ * Read terminated, decrease the count of readers in the current aiocontext.
+ * If the writer is waiting for reads to finish (has_writer == 1), signal
+ * the writer that we are done via aio_wait_kick() to let it continue.
+ */
+void coroutine_fn TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_co_rdunlock(void);
+
+/*
+ * bdrv_graph_rd{un}lock_main_loop:
+ * Just a placeholder to mark where the graph rdlock should be taken
+ * in the main loop. It is just asserting that we are not
+ * in a coroutine and in GLOBAL_STATE_CODE.
+ */
+void TSA_ACQUIRE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_rdlock_main_loop(void);
+
+void TSA_RELEASE_SHARED(graph_lock) TSA_NO_TSA
+bdrv_graph_rdunlock_main_loop(void);
+
+/*
+ * assert_bdrv_graph_readable:
+ * Make sure that the reader is either the main loop,
+ * or there is at least a reader helding the rdlock.
+ * In this way an incoming writer is aware of the read and waits.
+ */
+void GRAPH_RDLOCK assert_bdrv_graph_readable(void);
+
+/*
+ * assert_bdrv_graph_writable:
+ * Make sure that the writer is the main loop and has set @has_writer,
+ * so that incoming readers will pause.
+ */
+void GRAPH_WRLOCK assert_bdrv_graph_writable(void);
+
+/*
+ * Calling this function tells TSA that we know that the lock is effectively
+ * taken even though we cannot prove it (yet) with GRAPH_RDLOCK. This can be
+ * useful in intermediate stages of a conversion to using the GRAPH_RDLOCK
+ * macro.
+ */
+static inline void TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
+assume_graph_lock(void)
+{
+}
+
+typedef struct GraphLockable { } GraphLockable;
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
+ * In C++ it would be different, but then C++ wouldn't need QemuLockable
+ * either...
+ */
+#define GML_OBJ_() (&(GraphLockable) { })
+
+/*
+ * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
+ * cleanup attribute and would therefore complain that the graph is never
+ * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
+ * we hold the lock while unlocking is left unchecked.
+ */
+static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn
+graph_lockable_auto_lock(GraphLockable *x)
+{
+ bdrv_graph_co_rdlock();
+ return x;
+}
+
+static inline void TSA_NO_TSA coroutine_fn
+graph_lockable_auto_unlock(GraphLockable *x)
+{
+ bdrv_graph_co_rdunlock();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock)
+
+#define WITH_GRAPH_RDLOCK_GUARD_(var) \
+ for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \
+ var; \
+ graph_lockable_auto_unlock(var), var = NULL)
+
+#define WITH_GRAPH_RDLOCK_GUARD() \
+ WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__))
+
+#define GRAPH_RDLOCK_GUARD(x) \
+ g_autoptr(GraphLockable) \
+ glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
+ graph_lockable_auto_lock(GML_OBJ_())
+
+
+typedef struct GraphLockableMainloop { } GraphLockableMainloop;
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
+ * In C++ it would be different, but then C++ wouldn't need QemuLockable
+ * either...
+ */
+#define GMLML_OBJ_() (&(GraphLockableMainloop) { })
+
+/*
+ * This is not marked as TSA_ACQUIRE_SHARED() because TSA doesn't understand the
+ * cleanup attribute and would therefore complain that the graph is never
+ * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that
+ * we hold the lock while unlocking is left unchecked.
+ */
+static inline GraphLockableMainloop * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA
+graph_lockable_auto_lock_mainloop(GraphLockableMainloop *x)
+{
+ bdrv_graph_rdlock_main_loop();
+ return x;
+}
+
+static inline void TSA_NO_TSA
+graph_lockable_auto_unlock_mainloop(GraphLockableMainloop *x)
+{
+ bdrv_graph_rdunlock_main_loop();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockableMainloop,
+ graph_lockable_auto_unlock_mainloop)
+
+#define GRAPH_RDLOCK_GUARD_MAINLOOP(x) \
+ g_autoptr(GraphLockableMainloop) \
+ glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
+ graph_lockable_auto_lock_mainloop(GMLML_OBJ_())
+
+#endif /* GRAPH_LOCK_H */
+
diff --git a/include/block/nbd.h b/include/block/nbd.h
index 9bc3bfaeec..4e7bd6342f 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2019 Red Hat, Inc.
+ * Copyright Red Hat
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device
@@ -20,52 +20,70 @@
#ifndef NBD_H
#define NBD_H
-#include "qapi/qapi-types-block.h"
+#include "block/export.h"
#include "io/channel-socket.h"
#include "crypto/tlscreds.h"
#include "qapi/error.h"
+#include "qemu/bswap.h"
+
+typedef struct NBDExport NBDExport;
+typedef struct NBDClient NBDClient;
+typedef struct NBDClientConnection NBDClientConnection;
+typedef struct NBDMetaContexts NBDMetaContexts;
+
+extern const BlockExportDriver blk_exp_nbd;
/* Handshake phase structs - this struct is passed on the wire */
-struct NBDOption {
+typedef struct NBDOption {
uint64_t magic; /* NBD_OPTS_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOption NBDOption;
+} QEMU_PACKED NBDOption;
-struct NBDOptionReply {
+typedef struct NBDOptionReply {
uint64_t magic; /* NBD_REP_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t type; /* NBD_REP_* */
uint32_t length;
-} QEMU_PACKED;
-typedef struct NBDOptionReply NBDOptionReply;
+} QEMU_PACKED NBDOptionReply;
typedef struct NBDOptionReplyMetaContext {
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
uint32_t context_id;
- /* meta context name follows */
+ /* metadata context name follows */
} QEMU_PACKED NBDOptionReplyMetaContext;
-/* Transmission phase structs
- *
- * Note: these are _NOT_ the same as the network representation of an NBD
- * request and reply!
+/* Track results of negotiation */
+typedef enum NBDMode {
+ /* Keep this list in a continuum of increasing features. */
+ NBD_MODE_OLDSTYLE, /* server lacks newstyle negotiation */
+ NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
+ NBD_MODE_SIMPLE, /* newstyle but only simple replies */
+ NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
+ NBD_MODE_EXTENDED, /* newstyle, extended headers enabled */
+} NBDMode;
+
+/* Transmission phase structs */
+
+/*
+ * Note: NBDRequest is _NOT_ the same as the network representation of an NBD
+ * request!
*/
-struct NBDRequest {
- uint64_t handle;
- uint64_t from;
- uint32_t len;
+typedef struct NBDRequest {
+ uint64_t cookie;
+ uint64_t from; /* Offset touched by the command */
+ uint64_t len; /* Effect length; 32 bit limit without extended headers */
uint16_t flags; /* NBD_CMD_FLAG_* */
- uint16_t type; /* NBD_CMD_* */
-};
-typedef struct NBDRequest NBDRequest;
+ uint16_t type; /* NBD_CMD_* */
+ NBDMode mode; /* Determines which network representation to use */
+ NBDMetaContexts *contexts; /* Used by NBD_CMD_BLOCK_STATUS */
+} NBDRequest;
typedef struct NBDSimpleReply {
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
uint32_t error;
- uint64_t handle;
+ uint64_t cookie;
} QEMU_PACKED NBDSimpleReply;
/* Header of all structured replies */
@@ -73,57 +91,94 @@ typedef struct NBDStructuredReplyChunk {
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
uint16_t type; /* NBD_REPLY_TYPE_* */
- uint64_t handle; /* request handle */
+ uint64_t cookie; /* request handle */
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
+typedef struct NBDExtendedReplyChunk {
+ uint32_t magic; /* NBD_EXTENDED_REPLY_MAGIC */
+ uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
+ uint16_t type; /* NBD_REPLY_TYPE_* */
+ uint64_t cookie; /* request handle */
+ uint64_t offset; /* request offset */
+ uint64_t length; /* length of payload */
+} QEMU_PACKED NBDExtendedReplyChunk;
+
typedef union NBDReply {
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
+ NBDExtendedReplyChunk extended;
struct {
- /* @magic and @handle fields have the same offset and size both in
- * simple reply and structured reply chunk, so let them be accessible
- * without ".simple." or ".structured." specification
+ /*
+ * @magic and @cookie fields have the same offset and size in all
+ * forms of replies, so let them be accessible without ".simple.",
+ * ".structured.", or ".extended." specifications.
*/
uint32_t magic;
uint32_t _skip;
- uint64_t handle;
- } QEMU_PACKED;
+ uint64_t cookie;
+ };
} NBDReply;
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, simple.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, structured.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, extended.cookie) !=
+ offsetof(NBDReply, cookie));
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
- NBDStructuredReplyChunk h; /* h.length >= 9 */
+ /* header's .length >= 9 */
uint64_t offset;
/* At least one byte of data payload follows, calculated from h.length */
} QEMU_PACKED NBDStructuredReadData;
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
typedef struct NBDStructuredReadHole {
- NBDStructuredReplyChunk h; /* h.length == 12 */
+ /* header's length == 12 */
uint64_t offset;
uint32_t length;
} QEMU_PACKED NBDStructuredReadHole;
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
typedef struct NBDStructuredError {
- NBDStructuredReplyChunk h; /* h.length >= 6 */
+ /* header's length >= 6 */
uint32_t error;
uint16_t message_length;
} QEMU_PACKED NBDStructuredError;
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDStructuredMeta {
- NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
+ /* header's length >= 12 (at least one extent) */
uint32_t context_id;
- /* extents follows */
+ /* NBDExtent32 extents[] follows, array length implied by header */
} QEMU_PACKED NBDStructuredMeta;
-/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
-typedef struct NBDExtent {
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS */
+typedef struct NBDExtent32 {
uint32_t length;
uint32_t flags; /* NBD_STATE_* */
-} QEMU_PACKED NBDExtent;
+} QEMU_PACKED NBDExtent32;
+
+/* Header of NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtendedMeta {
+ /* header's length >= 24 (at least one extent) */
+ uint32_t context_id;
+ uint32_t count; /* header length must be count * 16 + 8 */
+ /* NBDExtent64 extents[count] follows */
+} QEMU_PACKED NBDExtendedMeta;
+
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtent64 {
+ uint64_t length;
+ uint64_t flags; /* NBD_STATE_* */
+} QEMU_PACKED NBDExtent64;
+
+/* Client payload for limiting NBD_CMD_BLOCK_STATUS reply */
+typedef struct NBDBlockStatusPayload {
+ uint64_t effect_length;
+ /* uint32_t ids[] follows, array length implied by header */
+} QEMU_PACKED NBDBlockStatusPayload;
/* Transmission (export) flags: sent from server to client during handshake,
but describe what will happen during transmission */
@@ -141,20 +196,22 @@ enum {
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
+ NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT = 12, /* PAYLOAD flag for BLOCK_STATUS */
};
-#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
-#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
-#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
-#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
-#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
-#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
-#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
-#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
-#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
-#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
-#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
-#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
+#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
+#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
+#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
+#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
+#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
+#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
+#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
+#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
+#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
+#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
+#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
+#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
+#define NBD_FLAG_BLOCK_STAT_PAYLOAD (1 << NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT)
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
@@ -177,6 +234,7 @@ enum {
#define NBD_OPT_STRUCTURED_REPLY (8)
#define NBD_OPT_LIST_META_CONTEXT (9)
#define NBD_OPT_SET_META_CONTEXT (10)
+#define NBD_OPT_EXTENDED_HEADERS (11)
/* Option reply types. */
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
@@ -194,6 +252,8 @@ enum {
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
+#define NBD_REP_ERR_TOO_BIG NBD_REP_ERR(9) /* Payload size overflow */
+#define NBD_REP_ERR_EXT_HEADER_REQD NBD_REP_ERR(10) /* Need extended headers */
/* Info types, used during NBD_REP_INFO */
#define NBD_INFO_EXPORT 0
@@ -202,12 +262,14 @@ enum {
#define NBD_INFO_BLOCK_SIZE 3
/* Request flags, sent from client to server during transmission phase */
-#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
-#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
-#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
-#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
- * reply chunk */
-#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
+#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
+#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
+#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
+#define NBD_CMD_FLAG_REQ_ONE (1 << 3) \
+ /* only one extent in BLOCK_STATUS reply chunk */
+#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
+#define NBD_CMD_FLAG_PAYLOAD_LEN (1 << 5) \
+ /* length describes payload, not effect; only with ext header */
/* Supported request types */
enum {
@@ -227,28 +289,37 @@ enum {
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
/*
- * Maximum size of a protocol string (export name, meta context name,
+ * Maximum size of a protocol string (export name, metadata context name,
* etc.). Use malloc rather than stack allocation for storage of a
* string.
*/
#define NBD_MAX_STRING_SIZE 4096
-/* Two types of reply structures */
+/* Two types of request structures, a given client will only use 1 */
+#define NBD_REQUEST_MAGIC 0x25609513
+#define NBD_EXTENDED_REQUEST_MAGIC 0x21e41c71
+
+/*
+ * Three types of reply structures, but what a client expects depends
+ * on NBD_OPT_STRUCTURED_REPLY and NBD_OPT_EXTENDED_HEADERS.
+ */
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
+#define NBD_EXTENDED_REPLY_MAGIC 0x6e8a278c
-/* Structured reply flags */
+/* Chunk reply flags (for structured and extended replies) */
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
-/* Structured reply types */
+/* Chunk reply types */
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
-#define NBD_REPLY_TYPE_NONE 0
-#define NBD_REPLY_TYPE_OFFSET_DATA 1
-#define NBD_REPLY_TYPE_OFFSET_HOLE 2
-#define NBD_REPLY_TYPE_BLOCK_STATUS 5
-#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
-#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
+#define NBD_REPLY_TYPE_NONE 0
+#define NBD_REPLY_TYPE_OFFSET_DATA 1
+#define NBD_REPLY_TYPE_OFFSET_HOLE 2
+#define NBD_REPLY_TYPE_BLOCK_STATUS 5
+#define NBD_REPLY_TYPE_BLOCK_STATUS_EXT 6
+#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
+#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_HOLE (1 << 0)
@@ -257,6 +328,8 @@ enum {
/* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_DIRTY (1 << 0)
+/* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */
+
static inline bool nbd_reply_type_is_error(int type)
{
return type & (1 << 15);
@@ -277,7 +350,7 @@ static inline bool nbd_reply_type_is_error(int type)
#define NBD_ESHUTDOWN 108
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
-struct NBDExportInfo {
+typedef struct NBDExportInfo {
/* Set by client before nbd_receive_negotiate() */
bool request_sizes;
char *x_dirty_bitmap;
@@ -288,7 +361,7 @@ struct NBDExportInfo {
/* In-out fields, set by client before nbd_receive_negotiate() and
* updated by server results during nbd_receive_negotiate() */
- bool structured_reply;
+ NBDMode mode; /* input maximum mode tolerated; output actual mode chosen */
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
/* Set by server results during nbd_receive_negotiate() and
@@ -305,11 +378,9 @@ struct NBDExportInfo {
char *description;
int n_contexts;
char **contexts;
-};
-typedef struct NBDExportInfo NBDExportInfo;
+} NBDExportInfo;
-int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
- QCryptoTLSCreds *tlscreds,
+int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
const char *hostname, QIOChannel **outioc,
NBDExportInfo *info, Error **errp);
void nbd_free_export_list(NBDExportInfo *info, int count);
@@ -320,29 +391,16 @@ int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
Error **errp);
int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
- NBDReply *reply, Error **errp);
+ NBDReply *reply, NBDMode mode,
+ Error **errp);
int nbd_client(int fd);
int nbd_disconnect(int fd);
int nbd_errno_to_system_errno(int err);
-typedef struct NBDExport NBDExport;
-typedef struct NBDClient NBDClient;
-
-NBDExport *nbd_export_new(BlockDriverState *bs, uint64_t dev_offset,
- uint64_t size, const char *name, const char *desc,
- const char *bitmap, bool readonly, bool shared,
- void (*close)(NBDExport *), bool writethrough,
- BlockBackend *on_eject_blk, Error **errp);
-void nbd_export_close(NBDExport *exp);
-void nbd_export_remove(NBDExport *exp, NbdServerRemoveMode mode, Error **errp);
-void nbd_export_get(NBDExport *exp);
-void nbd_export_put(NBDExport *exp);
-
-BlockBackend *nbd_export_get_blockdev(NBDExport *exp);
+void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
AioContext *nbd_export_aio_context(NBDExport *exp);
NBDExport *nbd_export_find(const char *name);
-void nbd_export_close_all(void);
void nbd_client_new(QIOChannelSocket *sioc,
QCryptoTLSCreds *tlscreds,
@@ -351,8 +409,12 @@ void nbd_client_new(QIOChannelSocket *sioc,
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
+void nbd_server_is_qemu_nbd(int max_connections);
+bool nbd_server_is_running(void);
+int nbd_server_max_connections(void);
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
- const char *tls_authz, Error **errp);
+ const char *tls_authz, uint32_t max_connections,
+ Error **errp);
void nbd_server_start_options(NbdServerOptions *arg, Error **errp);
/* nbd_read
@@ -368,7 +430,7 @@ static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
if (desc) {
error_prepend(errp, "Failed to read %s: ", desc);
}
- return -1;
+ return ret;
}
return 0;
@@ -379,8 +441,9 @@ static inline int nbd_read##bits(QIOChannel *ioc, \
uint##bits##_t *val, \
const char *desc, Error **errp) \
{ \
- if (nbd_read(ioc, val, sizeof(*val), desc, errp) < 0) { \
- return -1; \
+ int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \
+ if (ret < 0) { \
+ return ret; \
} \
*val = be##bits##_to_cpu(*val); \
return 0; \
@@ -408,5 +471,23 @@ const char *nbd_rep_lookup(uint32_t rep);
const char *nbd_info_lookup(uint16_t info);
const char *nbd_cmd_lookup(uint16_t info);
const char *nbd_err_lookup(int err);
+const char *nbd_mode_lookup(NBDMode mode);
+
+/* nbd/client-connection.c */
+void nbd_client_connection_enable_retry(NBDClientConnection *conn);
+
+NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
+ bool do_negotiation,
+ const char *export_name,
+ const char *x_dirty_bitmap,
+ QCryptoTLSCreds *tlscreds,
+ const char *tlshostname);
+void nbd_client_connection_release(NBDClientConnection *conn);
+
+QIOChannel *coroutine_fn
+nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
+ bool blocking, Error **errp);
+
+void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
#endif
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 1720ee1d51..bb231d0b9a 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1,29 +1,83 @@
#ifndef BLOCK_NVME_H
#define BLOCK_NVME_H
-typedef struct NvmeBar {
+#include "hw/registerfields.h"
+
+typedef struct QEMU_PACKED NvmeBar {
uint64_t cap;
uint32_t vs;
uint32_t intms;
uint32_t intmc;
uint32_t cc;
- uint32_t rsvd1;
+ uint8_t rsvd24[4];
uint32_t csts;
- uint32_t nssrc;
+ uint32_t nssr;
uint32_t aqa;
uint64_t asq;
uint64_t acq;
uint32_t cmbloc;
uint32_t cmbsz;
- uint8_t padding[3520]; /* not used by QEMU */
+ uint32_t bpinfo;
+ uint32_t bprsel;
+ uint64_t bpmbl;
+ uint64_t cmbmsc;
+ uint32_t cmbsts;
+ uint8_t rsvd92[3492];
uint32_t pmrcap;
uint32_t pmrctl;
uint32_t pmrsts;
uint32_t pmrebs;
uint32_t pmrswtp;
- uint32_t pmrmsc;
+ uint32_t pmrmscl;
+ uint32_t pmrmscu;
+ uint8_t css[484];
} NvmeBar;
+enum NvmeBarRegs {
+ NVME_REG_CAP = offsetof(NvmeBar, cap),
+ NVME_REG_VS = offsetof(NvmeBar, vs),
+ NVME_REG_INTMS = offsetof(NvmeBar, intms),
+ NVME_REG_INTMC = offsetof(NvmeBar, intmc),
+ NVME_REG_CC = offsetof(NvmeBar, cc),
+ NVME_REG_CSTS = offsetof(NvmeBar, csts),
+ NVME_REG_NSSR = offsetof(NvmeBar, nssr),
+ NVME_REG_AQA = offsetof(NvmeBar, aqa),
+ NVME_REG_ASQ = offsetof(NvmeBar, asq),
+ NVME_REG_ACQ = offsetof(NvmeBar, acq),
+ NVME_REG_CMBLOC = offsetof(NvmeBar, cmbloc),
+ NVME_REG_CMBSZ = offsetof(NvmeBar, cmbsz),
+ NVME_REG_BPINFO = offsetof(NvmeBar, bpinfo),
+ NVME_REG_BPRSEL = offsetof(NvmeBar, bprsel),
+ NVME_REG_BPMBL = offsetof(NvmeBar, bpmbl),
+ NVME_REG_CMBMSC = offsetof(NvmeBar, cmbmsc),
+ NVME_REG_CMBSTS = offsetof(NvmeBar, cmbsts),
+ NVME_REG_PMRCAP = offsetof(NvmeBar, pmrcap),
+ NVME_REG_PMRCTL = offsetof(NvmeBar, pmrctl),
+ NVME_REG_PMRSTS = offsetof(NvmeBar, pmrsts),
+ NVME_REG_PMREBS = offsetof(NvmeBar, pmrebs),
+ NVME_REG_PMRSWTP = offsetof(NvmeBar, pmrswtp),
+ NVME_REG_PMRMSCL = offsetof(NvmeBar, pmrmscl),
+ NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
+};
+
+typedef struct QEMU_PACKED NvmeEndGrpLog {
+ uint8_t critical_warning;
+ uint8_t rsvd[2];
+ uint8_t avail_spare;
+ uint8_t avail_spare_thres;
+ uint8_t percet_used;
+ uint8_t rsvd1[26];
+ uint64_t end_estimate[2];
+ uint64_t data_units_read[2];
+ uint64_t data_units_written[2];
+ uint64_t media_units_written[2];
+ uint64_t host_read_commands[2];
+ uint64_t host_write_commands[2];
+ uint64_t media_integrity_errors[2];
+ uint64_t no_err_info_log_entries[2];
+ uint8_t rsvd2[352];
+} NvmeEndGrpLog;
+
enum NvmeCapShift {
CAP_MQES_SHIFT = 0,
CAP_CQR_SHIFT = 16,
@@ -34,7 +88,8 @@ enum NvmeCapShift {
CAP_CSS_SHIFT = 37,
CAP_MPSMIN_SHIFT = 48,
CAP_MPSMAX_SHIFT = 52,
- CAP_PMR_SHIFT = 56,
+ CAP_PMRS_SHIFT = 56,
+ CAP_CMBS_SHIFT = 57,
};
enum NvmeCapMask {
@@ -47,7 +102,8 @@ enum NvmeCapMask {
CAP_CSS_MASK = 0xff,
CAP_MPSMIN_MASK = 0xf,
CAP_MPSMAX_MASK = 0xf,
- CAP_PMR_MASK = 0x1,
+ CAP_PMRS_MASK = 0x1,
+ CAP_CMBS_MASK = 0x1,
};
#define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK)
@@ -59,27 +115,37 @@ enum NvmeCapMask {
#define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK)
#define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK)
#define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK)
-
-#define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \
- << CAP_MQES_SHIFT)
-#define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \
- << CAP_CQR_SHIFT)
-#define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \
- << CAP_AMS_SHIFT)
-#define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \
- << CAP_TO_SHIFT)
-#define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \
- << CAP_DSTRD_SHIFT)
-#define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \
- << CAP_NSSRS_SHIFT)
-#define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \
- << CAP_CSS_SHIFT)
-#define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\
- << CAP_MPSMIN_SHIFT)
-#define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\
- << CAP_MPSMAX_SHIFT)
-#define NVME_CAP_SET_PMRS(cap, val) (cap |= (uint64_t)(val & CAP_PMR_MASK)\
- << CAP_PMR_SHIFT)
+#define NVME_CAP_PMRS(cap) (((cap) >> CAP_PMRS_SHIFT) & CAP_PMRS_MASK)
+#define NVME_CAP_CMBS(cap) (((cap) >> CAP_CMBS_SHIFT) & CAP_CMBS_MASK)
+
+#define NVME_CAP_SET_MQES(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MQES_MASK) << CAP_MQES_SHIFT)
+#define NVME_CAP_SET_CQR(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CQR_MASK) << CAP_CQR_SHIFT)
+#define NVME_CAP_SET_AMS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_AMS_MASK) << CAP_AMS_SHIFT)
+#define NVME_CAP_SET_TO(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_TO_MASK) << CAP_TO_SHIFT)
+#define NVME_CAP_SET_DSTRD(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_DSTRD_MASK) << CAP_DSTRD_SHIFT)
+#define NVME_CAP_SET_NSSRS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_NSSRS_MASK) << CAP_NSSRS_SHIFT)
+#define NVME_CAP_SET_CSS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CSS_MASK) << CAP_CSS_SHIFT)
+#define NVME_CAP_SET_MPSMIN(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MPSMIN_MASK) << CAP_MPSMIN_SHIFT)
+#define NVME_CAP_SET_MPSMAX(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_MPSMAX_MASK) << CAP_MPSMAX_SHIFT)
+#define NVME_CAP_SET_PMRS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_PMRS_MASK) << CAP_PMRS_SHIFT)
+#define NVME_CAP_SET_CMBS(cap, val) \
+ ((cap) |= (uint64_t)((val) & CAP_CMBS_MASK) << CAP_CMBS_SHIFT)
+
+enum NvmeCapCss {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI_SUPP = 1 << 6,
+ NVME_CAP_CSS_ADMIN_ONLY = 1 << 7,
+};
enum NvmeCcShift {
CC_EN_SHIFT = 0,
@@ -109,6 +175,27 @@ enum NvmeCcMask {
#define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK)
#define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK)
+enum NvmeCcCss {
+ NVME_CC_CSS_NVM = 0x0,
+ NVME_CC_CSS_CSI = 0x6,
+ NVME_CC_CSS_ADMIN_ONLY = 0x7,
+};
+
+#define NVME_SET_CC_EN(cc, val) \
+ (cc |= (uint32_t)((val) & CC_EN_MASK) << CC_EN_SHIFT)
+#define NVME_SET_CC_CSS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_CSS_MASK) << CC_CSS_SHIFT)
+#define NVME_SET_CC_MPS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_MPS_MASK) << CC_MPS_SHIFT)
+#define NVME_SET_CC_AMS(cc, val) \
+ (cc |= (uint32_t)((val) & CC_AMS_MASK) << CC_AMS_SHIFT)
+#define NVME_SET_CC_SHN(cc, val) \
+ (cc |= (uint32_t)((val) & CC_SHN_MASK) << CC_SHN_SHIFT)
+#define NVME_SET_CC_IOSQES(cc, val) \
+ (cc |= (uint32_t)((val) & CC_IOSQES_MASK) << CC_IOSQES_SHIFT)
+#define NVME_SET_CC_IOCQES(cc, val) \
+ (cc |= (uint32_t)((val) & CC_IOCQES_MASK) << CC_IOCQES_SHIFT)
+
enum NvmeCstsShift {
CSTS_RDY_SHIFT = 0,
CSTS_CFS_SHIFT = 1,
@@ -151,25 +238,64 @@ enum NvmeAqaMask {
#define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK)
enum NvmeCmblocShift {
- CMBLOC_BIR_SHIFT = 0,
- CMBLOC_OFST_SHIFT = 12,
+ CMBLOC_BIR_SHIFT = 0,
+ CMBLOC_CQMMS_SHIFT = 3,
+ CMBLOC_CQPDS_SHIFT = 4,
+ CMBLOC_CDPMLS_SHIFT = 5,
+ CMBLOC_CDPCILS_SHIFT = 6,
+ CMBLOC_CDMMMS_SHIFT = 7,
+ CMBLOC_CQDA_SHIFT = 8,
+ CMBLOC_OFST_SHIFT = 12,
};
enum NvmeCmblocMask {
- CMBLOC_BIR_MASK = 0x7,
- CMBLOC_OFST_MASK = 0xfffff,
+ CMBLOC_BIR_MASK = 0x7,
+ CMBLOC_CQMMS_MASK = 0x1,
+ CMBLOC_CQPDS_MASK = 0x1,
+ CMBLOC_CDPMLS_MASK = 0x1,
+ CMBLOC_CDPCILS_MASK = 0x1,
+ CMBLOC_CDMMMS_MASK = 0x1,
+ CMBLOC_CQDA_MASK = 0x1,
+ CMBLOC_OFST_MASK = 0xfffff,
};
-#define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \
- CMBLOC_BIR_MASK)
-#define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \
- CMBLOC_OFST_MASK)
-
-#define NVME_CMBLOC_SET_BIR(cmbloc, val) \
+#define NVME_CMBLOC_BIR(cmbloc) \
+ ((cmbloc >> CMBLOC_BIR_SHIFT) & CMBLOC_BIR_MASK)
+#define NVME_CMBLOC_CQMMS(cmbloc) \
+ ((cmbloc >> CMBLOC_CQMMS_SHIFT) & CMBLOC_CQMMS_MASK)
+#define NVME_CMBLOC_CQPDS(cmbloc) \
+ ((cmbloc >> CMBLOC_CQPDS_SHIFT) & CMBLOC_CQPDS_MASK)
+#define NVME_CMBLOC_CDPMLS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDPMLS_SHIFT) & CMBLOC_CDPMLS_MASK)
+#define NVME_CMBLOC_CDPCILS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDPCILS_SHIFT) & CMBLOC_CDPCILS_MASK)
+#define NVME_CMBLOC_CDMMMS(cmbloc) \
+ ((cmbloc >> CMBLOC_CDMMMS_SHIFT) & CMBLOC_CDMMMS_MASK)
+#define NVME_CMBLOC_CQDA(cmbloc) \
+ ((cmbloc >> CMBLOC_CQDA_SHIFT) & CMBLOC_CQDA_MASK)
+#define NVME_CMBLOC_OFST(cmbloc) \
+ ((cmbloc >> CMBLOC_OFST_SHIFT) & CMBLOC_OFST_MASK)
+
+#define NVME_CMBLOC_SET_BIR(cmbloc, val) \
(cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT)
+#define NVME_CMBLOC_SET_CQMMS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQMMS_MASK) << CMBLOC_CQMMS_SHIFT)
+#define NVME_CMBLOC_SET_CQPDS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQPDS_MASK) << CMBLOC_CQPDS_SHIFT)
+#define NVME_CMBLOC_SET_CDPMLS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDPMLS_MASK) << CMBLOC_CDPMLS_SHIFT)
+#define NVME_CMBLOC_SET_CDPCILS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDPCILS_MASK) << CMBLOC_CDPCILS_SHIFT)
+#define NVME_CMBLOC_SET_CDMMMS(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CDMMMS_MASK) << CMBLOC_CDMMMS_SHIFT)
+#define NVME_CMBLOC_SET_CQDA(cmbloc, val) \
+ (cmbloc |= (uint64_t)(val & CMBLOC_CQDA_MASK) << CMBLOC_CQDA_SHIFT)
#define NVME_CMBLOC_SET_OFST(cmbloc, val) \
(cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT)
+#define NVME_CMBMSMC_SET_CRE (cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBMSC_CRE_SHIFT)
+
enum NvmeCmbszShift {
CMBSZ_SQS_SHIFT = 0,
CMBSZ_CQS_SHIFT = 1,
@@ -216,6 +342,46 @@ enum NvmeCmbszMask {
#define NVME_CMBSZ_GETSIZE(cmbsz) \
(NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))))
+enum NvmeCmbmscShift {
+ CMBMSC_CRE_SHIFT = 0,
+ CMBMSC_CMSE_SHIFT = 1,
+ CMBMSC_CBA_SHIFT = 12,
+};
+
+enum NvmeCmbmscMask {
+ CMBMSC_CRE_MASK = 0x1,
+ CMBMSC_CMSE_MASK = 0x1,
+ CMBMSC_CBA_MASK = ((1ULL << 52) - 1),
+};
+
+#define NVME_CMBMSC_CRE(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CRE_SHIFT) & CMBMSC_CRE_MASK)
+#define NVME_CMBMSC_CMSE(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CMSE_SHIFT) & CMBMSC_CMSE_MASK)
+#define NVME_CMBMSC_CBA(cmbmsc) \
+ ((cmbmsc >> CMBMSC_CBA_SHIFT) & CMBMSC_CBA_MASK)
+
+
+#define NVME_CMBMSC_SET_CRE(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CRE_MASK) << CMBMSC_CRE_SHIFT)
+#define NVME_CMBMSC_SET_CMSE(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CMSE_MASK) << CMBMSC_CMSE_SHIFT)
+#define NVME_CMBMSC_SET_CBA(cmbmsc, val) \
+ (cmbmsc |= (uint64_t)(val & CMBMSC_CBA_MASK) << CMBMSC_CBA_SHIFT)
+
+enum NvmeCmbstsShift {
+ CMBSTS_CBAI_SHIFT = 0,
+};
+enum NvmeCmbstsMask {
+ CMBSTS_CBAI_MASK = 0x1,
+};
+
+#define NVME_CMBSTS_CBAI(cmbsts) \
+ ((cmbsts >> CMBSTS_CBAI_SHIFT) & CMBSTS_CBAI_MASK)
+
+#define NVME_CMBSTS_SET_CBAI(cmbsts, val) \
+ (cmbsts |= (uint64_t)(val & CMBSTS_CBAI_MASK) << CMBSTS_CBAI_SHIFT)
+
enum NvmePmrcapShift {
PMRCAP_RDS_SHIFT = 3,
PMRCAP_WDS_SHIFT = 4,
@@ -357,35 +523,73 @@ enum NvmePmrswtpMask {
#define NVME_PMRSWTP_SET_PMRSWTV(pmrswtp, val) \
(pmrswtp |= (uint64_t)(val & PMRSWTP_PMRSWTV_MASK) << PMRSWTP_PMRSWTV_SHIFT)
-enum NvmePmrmscShift {
- PMRMSC_CMSE_SHIFT = 1,
- PMRMSC_CBA_SHIFT = 12,
+enum NvmePmrmsclShift {
+ PMRMSCL_CMSE_SHIFT = 1,
+ PMRMSCL_CBA_SHIFT = 12,
};
-enum NvmePmrmscMask {
- PMRMSC_CMSE_MASK = 0x1,
- PMRMSC_CBA_MASK = 0xfffffffffffff,
+enum NvmePmrmsclMask {
+ PMRMSCL_CMSE_MASK = 0x1,
+ PMRMSCL_CBA_MASK = 0xfffff,
+};
+
+#define NVME_PMRMSCL_CMSE(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CMSE_SHIFT) & PMRMSCL_CMSE_MASK)
+#define NVME_PMRMSCL_CBA(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CBA_SHIFT) & PMRMSCL_CBA_MASK)
+
+#define NVME_PMRMSCL_SET_CMSE(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CMSE_MASK) << PMRMSCL_CMSE_SHIFT)
+#define NVME_PMRMSCL_SET_CBA(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CBA_MASK) << PMRMSCL_CBA_SHIFT)
+
+enum NvmeSglDescriptorType {
+ NVME_SGL_DESCR_TYPE_DATA_BLOCK = 0x0,
+ NVME_SGL_DESCR_TYPE_BIT_BUCKET = 0x1,
+ NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
+ NVME_SGL_DESCR_TYPE_LAST_SEGMENT = 0x3,
+ NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK = 0x4,
+
+ NVME_SGL_DESCR_TYPE_VENDOR_SPECIFIC = 0xf,
};
-#define NVME_PMRMSC_CMSE(pmrmsc) \
- ((pmrmsc >> PMRMSC_CMSE_SHIFT) & PMRMSC_CMSE_MASK)
-#define NVME_PMRMSC_CBA(pmrmsc) \
- ((pmrmsc >> PMRMSC_CBA_SHIFT) & PMRMSC_CBA_MASK)
+enum NvmeSglDescriptorSubtype {
+ NVME_SGL_DESCR_SUBTYPE_ADDRESS = 0x0,
+};
-#define NVME_PMRMSC_SET_CMSE(pmrmsc, val) \
- (pmrmsc |= (uint64_t)(val & PMRMSC_CMSE_MASK) << PMRMSC_CMSE_SHIFT)
-#define NVME_PMRMSC_SET_CBA(pmrmsc, val) \
- (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
+typedef struct QEMU_PACKED NvmeSglDescriptor {
+ uint64_t addr;
+ uint32_t len;
+ uint8_t rsvd[3];
+ uint8_t type;
+} NvmeSglDescriptor;
+
+#define NVME_SGL_TYPE(type) ((type >> 4) & 0xf)
+#define NVME_SGL_SUBTYPE(type) (type & 0xf)
+
+typedef union NvmeCmdDptr {
+ struct {
+ uint64_t prp1;
+ uint64_t prp2;
+ };
+
+ NvmeSglDescriptor sgl;
+} NvmeCmdDptr;
+
+enum NvmePsdt {
+ NVME_PSDT_PRP = 0x0,
+ NVME_PSDT_SGL_MPTR_CONTIGUOUS = 0x1,
+ NVME_PSDT_SGL_MPTR_SGL = 0x2,
+};
-typedef struct NvmeCmd {
+typedef struct QEMU_PACKED NvmeCmd {
uint8_t opcode;
- uint8_t fuse;
+ uint8_t flags;
uint16_t cid;
uint32_t nsid;
uint64_t res1;
uint64_t mptr;
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint32_t cdw10;
uint32_t cdw11;
uint32_t cdw12;
@@ -394,6 +598,9 @@ typedef struct NvmeCmd {
uint32_t cdw15;
} NvmeCmd;
+#define NVME_CMD_FLAGS_FUSE(flags) (flags & 0x3)
+#define NVME_CMD_FLAGS_PSDT(flags) ((flags >> 6) & 0x3)
+
enum NvmeAdminCommands {
NVME_ADM_CMD_DELETE_SQ = 0x00,
NVME_ADM_CMD_CREATE_SQ = 0x01,
@@ -407,6 +614,11 @@ enum NvmeAdminCommands {
NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c,
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
+ NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
+ NVME_ADM_CMD_DIRECTIVE_SEND = 0x19,
+ NVME_ADM_CMD_VIRT_MNGMT = 0x1c,
+ NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a,
+ NVME_ADM_CMD_DBBUF_CONFIG = 0x7c,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
NVME_ADM_CMD_SECURITY_RECV = 0x82,
@@ -418,11 +630,18 @@ enum NvmeIoCommands {
NVME_CMD_READ = 0x02,
NVME_CMD_WRITE_UNCOR = 0x04,
NVME_CMD_COMPARE = 0x05,
- NVME_CMD_WRITE_ZEROS = 0x08,
+ NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
+ NVME_CMD_VERIFY = 0x0c,
+ NVME_CMD_IO_MGMT_RECV = 0x12,
+ NVME_CMD_COPY = 0x19,
+ NVME_CMD_IO_MGMT_SEND = 0x1d,
+ NVME_CMD_ZONE_MGMT_SEND = 0x79,
+ NVME_CMD_ZONE_MGMT_RECV = 0x7a,
+ NVME_CMD_ZONE_APPEND = 0x7d,
};
-typedef struct NvmeDeleteQ {
+typedef struct QEMU_PACKED NvmeDeleteQ {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -432,7 +651,7 @@ typedef struct NvmeDeleteQ {
uint32_t rsvd11[5];
} NvmeDeleteQ;
-typedef struct NvmeCreateCq {
+typedef struct QEMU_PACKED NvmeCreateCq {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -449,7 +668,12 @@ typedef struct NvmeCreateCq {
#define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1)
#define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1)
-typedef struct NvmeCreateSq {
+enum NvmeFlagsCq {
+ NVME_CQ_PC = 1,
+ NVME_CQ_IEN = 2,
+};
+
+typedef struct QEMU_PACKED NvmeCreateSq {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -466,15 +690,16 @@ typedef struct NvmeCreateSq {
#define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1)
#define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3)
-enum NvmeQueueFlags {
- NVME_Q_PC = 1,
- NVME_Q_PRIO_URGENT = 0,
- NVME_Q_PRIO_HIGH = 1,
- NVME_Q_PRIO_NORMAL = 2,
- NVME_Q_PRIO_LOW = 3,
+enum NvmeFlagsSq {
+ NVME_SQ_PC = 1,
+
+ NVME_SQ_PRIO_URGENT = 0,
+ NVME_SQ_PRIO_HIGH = 1,
+ NVME_SQ_PRIO_NORMAL = 2,
+ NVME_SQ_PRIO_LOW = 3,
};
-typedef struct NvmeIdentify {
+typedef struct QEMU_PACKED NvmeIdentify {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
@@ -482,23 +707,30 @@ typedef struct NvmeIdentify {
uint64_t rsvd2[2];
uint64_t prp1;
uint64_t prp2;
- uint32_t cns;
- uint32_t rsvd11[5];
+ uint8_t cns;
+ uint8_t rsvd10;
+ uint16_t ctrlid;
+ uint16_t nvmsetid;
+ uint8_t rsvd11;
+ uint8_t csi;
+ uint32_t rsvd12[4];
} NvmeIdentify;
-typedef struct NvmeRwCmd {
+typedef struct QEMU_PACKED NvmeRwCmd {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
uint32_t nsid;
- uint64_t rsvd2;
+ uint32_t cdw2;
+ uint32_t cdw3;
uint64_t mptr;
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint64_t slba;
uint16_t nlb;
uint16_t control;
- uint32_t dsmgmt;
+ uint8_t dsmgmt;
+ uint8_t rsvd;
+ uint16_t dspec;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
@@ -522,20 +754,31 @@ enum {
NVME_RW_DSM_LATENCY_LOW = 3 << 4,
NVME_RW_DSM_SEQ_REQ = 1 << 6,
NVME_RW_DSM_COMPRESSED = 1 << 7,
+ NVME_RW_PIREMAP = 1 << 9,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
+ NVME_RW_PRINFO_PRCHK_MASK = 7 << 10,
+};
+
+#define NVME_RW_PRINFO(control) ((control >> 10) & 0xf)
+
+enum {
+ NVME_PRINFO_PRACT = 1 << 3,
+ NVME_PRINFO_PRCHK_GUARD = 1 << 2,
+ NVME_PRINFO_PRCHK_APP = 1 << 1,
+ NVME_PRINFO_PRCHK_REF = 1 << 0,
+ NVME_PRINFO_PRCHK_MASK = 7 << 0,
};
-typedef struct NvmeDsmCmd {
+typedef struct QEMU_PACKED NvmeDsmCmd {
uint8_t opcode;
uint8_t flags;
uint16_t cid;
uint32_t nsid;
uint64_t rsvd2[2];
- uint64_t prp1;
- uint64_t prp2;
+ NvmeCmdDptr dptr;
uint32_t nr;
uint32_t attributes;
uint32_t rsvd12[4];
@@ -547,19 +790,64 @@ enum {
NVME_DSMGMT_AD = 1 << 2,
};
-typedef struct NvmeDsmRange {
+typedef struct QEMU_PACKED NvmeDsmRange {
uint32_t cattr;
uint32_t nlb;
uint64_t slba;
} NvmeDsmRange;
+enum {
+ NVME_COPY_FORMAT_0 = 0x0,
+ NVME_COPY_FORMAT_1 = 0x1,
+};
+
+typedef struct QEMU_PACKED NvmeCopyCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t cdw2;
+ uint32_t cdw3;
+ uint32_t rsvd2[2];
+ NvmeCmdDptr dptr;
+ uint64_t sdlba;
+ uint8_t nr;
+ uint8_t control[3];
+ uint16_t rsvd13;
+ uint16_t dspec;
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopyCmd;
+
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0 {
+ uint8_t rsvd0[8];
+ uint64_t slba;
+ uint16_t nlb;
+ uint8_t rsvd18[6];
+ uint32_t reftag;
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopySourceRangeFormat0;
+
+typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1 {
+ uint8_t rsvd0[8];
+ uint64_t slba;
+ uint16_t nlb;
+ uint8_t rsvd18[8];
+ uint8_t sr[10];
+ uint16_t apptag;
+ uint16_t appmask;
+} NvmeCopySourceRangeFormat1;
+
enum NvmeAsyncEventRequest {
NVME_AER_TYPE_ERROR = 0,
NVME_AER_TYPE_SMART = 1,
+ NVME_AER_TYPE_NOTICE = 2,
NVME_AER_TYPE_IO_SPECIFIC = 6,
NVME_AER_TYPE_VENDOR_SPECIFIC = 7,
- NVME_AER_INFO_ERR_INVALID_SQ = 0,
- NVME_AER_INFO_ERR_INVALID_DB = 1,
+ NVME_AER_INFO_ERR_INVALID_DB_REGISTER = 0,
+ NVME_AER_INFO_ERR_INVALID_DB_VALUE = 1,
NVME_AER_INFO_ERR_DIAG_FAIL = 2,
NVME_AER_INFO_ERR_PERS_INTERNAL_ERR = 3,
NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR = 4,
@@ -567,18 +855,23 @@ enum NvmeAsyncEventRequest {
NVME_AER_INFO_SMART_RELIABILITY = 0,
NVME_AER_INFO_SMART_TEMP_THRESH = 1,
NVME_AER_INFO_SMART_SPARE_THRESH = 2,
+ NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED = 0,
};
-typedef struct NvmeAerResult {
+typedef struct QEMU_PACKED NvmeAerResult {
uint8_t event_type;
uint8_t event_info;
uint8_t log_page;
uint8_t resv;
} NvmeAerResult;
-typedef struct NvmeCqe {
+typedef struct QEMU_PACKED NvmeZonedResult {
+ uint64_t slba;
+} NvmeZonedResult;
+
+typedef struct QEMU_PACKED NvmeCqe {
uint32_t result;
- uint32_t rsvd;
+ uint32_t dw1;
uint16_t sq_head;
uint16_t sq_id;
uint16_t cid;
@@ -599,10 +892,22 @@ enum NvmeStatusCodes {
NVME_CMD_ABORT_MISSING_FUSE = 0x000a,
NVME_INVALID_NSID = 0x000b,
NVME_CMD_SEQ_ERROR = 0x000c,
+ NVME_INVALID_SGL_SEG_DESCR = 0x000d,
+ NVME_INVALID_NUM_SGL_DESCRS = 0x000e,
+ NVME_DATA_SGL_LEN_INVALID = 0x000f,
+ NVME_MD_SGL_LEN_INVALID = 0x0010,
+ NVME_SGL_DESCR_TYPE_INVALID = 0x0011,
+ NVME_INVALID_USE_OF_CMB = 0x0012,
+ NVME_INVALID_PRP_OFFSET = 0x0013,
+ NVME_CMD_SET_CMB_REJECTED = 0x002b,
+ NVME_INVALID_CMD_SET = 0x002c,
+ NVME_FDP_DISABLED = 0x0029,
+ NVME_INVALID_PHID_LIST = 0x002a,
NVME_LBA_RANGE = 0x0080,
NVME_CAP_EXCEEDED = 0x0081,
NVME_NS_NOT_READY = 0x0082,
NVME_NS_RESV_CONFLICT = 0x0083,
+ NVME_FORMAT_IN_PROGRESS = 0x0084,
NVME_INVALID_CQID = 0x0100,
NVME_INVALID_QID = 0x0101,
NVME_MAX_QSIZE_EXCEEDED = 0x0102,
@@ -617,11 +922,31 @@ enum NvmeStatusCodes {
NVME_FW_REQ_RESET = 0x010b,
NVME_INVALID_QUEUE_DEL = 0x010c,
NVME_FID_NOT_SAVEABLE = 0x010d,
- NVME_FID_NOT_NSID_SPEC = 0x010f,
+ NVME_FEAT_NOT_CHANGEABLE = 0x010e,
+ NVME_FEAT_NOT_NS_SPEC = 0x010f,
NVME_FW_REQ_SUSYSTEM_RESET = 0x0110,
+ NVME_NS_ALREADY_ATTACHED = 0x0118,
+ NVME_NS_PRIVATE = 0x0119,
+ NVME_NS_NOT_ATTACHED = 0x011a,
+ NVME_NS_CTRL_LIST_INVALID = 0x011c,
+ NVME_INVALID_CTRL_ID = 0x011f,
+ NVME_INVALID_SEC_CTRL_STATE = 0x0120,
+ NVME_INVALID_NUM_RESOURCES = 0x0121,
+ NVME_INVALID_RESOURCE_ID = 0x0122,
NVME_CONFLICTING_ATTRS = 0x0180,
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
+ NVME_CMD_SIZE_LIMIT = 0x0183,
+ NVME_INVALID_ZONE_OP = 0x01b6,
+ NVME_NOZRWA = 0x01b7,
+ NVME_ZONE_BOUNDARY_ERROR = 0x01b8,
+ NVME_ZONE_FULL = 0x01b9,
+ NVME_ZONE_READ_ONLY = 0x01ba,
+ NVME_ZONE_OFFLINE = 0x01bb,
+ NVME_ZONE_INVALID_WRITE = 0x01bc,
+ NVME_ZONE_TOO_MANY_ACTIVE = 0x01bd,
+ NVME_ZONE_TOO_MANY_OPEN = 0x01be,
+ NVME_ZONE_INVAL_TRANSITION = 0x01bf,
NVME_WRITE_FAULT = 0x0280,
NVME_UNRECOVERED_READ = 0x0281,
NVME_E2E_GUARD_ERROR = 0x0282,
@@ -629,12 +954,14 @@ enum NvmeStatusCodes {
NVME_E2E_REF_ERROR = 0x0284,
NVME_CMP_FAILURE = 0x0285,
NVME_ACCESS_DENIED = 0x0286,
+ NVME_DULB = 0x0287,
+ NVME_E2E_STORAGE_TAG_ERROR = 0x0288,
NVME_MORE = 0x2000,
NVME_DNR = 0x4000,
NVME_NO_COMPLETE = 0xffff,
};
-typedef struct NvmeFwSlotInfoLog {
+typedef struct QEMU_PACKED NvmeFwSlotInfoLog {
uint8_t afi;
uint8_t reserved1[7];
uint8_t frs1[8];
@@ -647,7 +974,7 @@ typedef struct NvmeFwSlotInfoLog {
uint8_t reserved2[448];
} NvmeFwSlotInfoLog;
-typedef struct NvmeErrorLog {
+typedef struct QEMU_PACKED NvmeErrorLog {
uint64_t error_count;
uint16_t sqid;
uint16_t cid;
@@ -659,9 +986,9 @@ typedef struct NvmeErrorLog {
uint8_t resv[35];
} NvmeErrorLog;
-typedef struct NvmeSmartLog {
+typedef struct QEMU_PACKED NvmeSmartLog {
uint8_t critical_warning;
- uint8_t temperature[2];
+ uint16_t temperature;
uint8_t available_spare;
uint8_t available_spare_threshold;
uint8_t percentage_used;
@@ -679,21 +1006,46 @@ typedef struct NvmeSmartLog {
uint8_t reserved2[320];
} NvmeSmartLog;
+#define NVME_SMART_WARN_MAX 6
enum NvmeSmartWarn {
NVME_SMART_SPARE = 1 << 0,
NVME_SMART_TEMPERATURE = 1 << 1,
NVME_SMART_RELIABILITY = 1 << 2,
NVME_SMART_MEDIA_READ_ONLY = 1 << 3,
NVME_SMART_FAILED_VOLATILE_MEDIA = 1 << 4,
+ NVME_SMART_PMR_UNRELIABLE = 1 << 5,
};
-enum LogIdentifier {
- NVME_LOG_ERROR_INFO = 0x01,
- NVME_LOG_SMART_INFO = 0x02,
- NVME_LOG_FW_SLOT_INFO = 0x03,
+typedef struct NvmeEffectsLog {
+ uint32_t acs[256];
+ uint32_t iocs[256];
+ uint8_t resv[2048];
+} NvmeEffectsLog;
+
+enum {
+ NVME_CMD_EFF_CSUPP = 1 << 0,
+ NVME_CMD_EFF_LBCC = 1 << 1,
+ NVME_CMD_EFF_NCC = 1 << 2,
+ NVME_CMD_EFF_NIC = 1 << 3,
+ NVME_CMD_EFF_CCC = 1 << 4,
+ NVME_CMD_EFF_CSE_MASK = 3 << 16,
+ NVME_CMD_EFF_UUID_SEL = 1 << 19,
};
-typedef struct NvmePSD {
+enum NvmeLogIdentifier {
+ NVME_LOG_ERROR_INFO = 0x01,
+ NVME_LOG_SMART_INFO = 0x02,
+ NVME_LOG_FW_SLOT_INFO = 0x03,
+ NVME_LOG_CHANGED_NSLIST = 0x04,
+ NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ENDGRP = 0x09,
+ NVME_LOG_FDP_CONFS = 0x20,
+ NVME_LOG_FDP_RUH_USAGE = 0x21,
+ NVME_LOG_FDP_STATS = 0x22,
+ NVME_LOG_FDP_EVENTS = 0x23,
+};
+
+typedef struct QEMU_PACKED NvmePSD {
uint16_t mp;
uint16_t reserved;
uint32_t enlat;
@@ -705,15 +1057,29 @@ typedef struct NvmePSD {
uint8_t resv[16];
} NvmePSD;
+#define NVME_CONTROLLER_LIST_SIZE 2048
#define NVME_IDENTIFY_DATA_SIZE 4096
-enum {
- NVME_ID_CNS_NS = 0x0,
- NVME_ID_CNS_CTRL = 0x1,
- NVME_ID_CNS_NS_ACTIVE_LIST = 0x2,
+enum NvmeIdCns {
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_DESCR_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_PRIMARY_CTRL_CAP = 0x14,
+ NVME_ID_CNS_SECONDARY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a,
+ NVME_ID_CNS_CS_NS_PRESENT = 0x1b,
+ NVME_ID_CNS_IO_COMMAND_SET = 0x1c,
};
-typedef struct NvmeIdCtrl {
+typedef struct QEMU_PACKED NvmeIdCtrl {
uint16_t vid;
uint16_t ssvid;
uint8_t sn[20];
@@ -723,7 +1089,16 @@ typedef struct NvmeIdCtrl {
uint8_t ieee[3];
uint8_t cmic;
uint8_t mdts;
- uint8_t rsvd255[178];
+ uint16_t cntlid;
+ uint32_t ver;
+ uint32_t rtd3r;
+ uint32_t rtd3e;
+ uint32_t oaes;
+ uint32_t ctratt;
+ uint8_t rsvd100[11];
+ uint8_t cntrltype;
+ uint8_t fguid[16];
+ uint8_t rsvd128[128];
uint16_t oacs;
uint8_t acl;
uint8_t aerl;
@@ -731,10 +1106,31 @@ typedef struct NvmeIdCtrl {
uint8_t lpa;
uint8_t elpe;
uint8_t npss;
- uint8_t rsvd511[248];
+ uint8_t avscc;
+ uint8_t apsta;
+ uint16_t wctemp;
+ uint16_t cctemp;
+ uint16_t mtfa;
+ uint32_t hmpre;
+ uint32_t hmmin;
+ uint8_t tnvmcap[16];
+ uint8_t unvmcap[16];
+ uint32_t rpmbs;
+ uint16_t edstt;
+ uint8_t dsto;
+ uint8_t fwug;
+ uint16_t kas;
+ uint16_t hctma;
+ uint16_t mntmt;
+ uint16_t mxtmt;
+ uint32_t sanicap;
+ uint8_t rsvd332[6];
+ uint16_t nsetidmax;
+ uint16_t endgidmax;
+ uint8_t rsvd342[170];
uint8_t sqes;
uint8_t cqes;
- uint16_t rsvd515;
+ uint16_t maxcmd;
uint32_t nn;
uint16_t oncs;
uint16_t fuses;
@@ -742,26 +1138,94 @@ typedef struct NvmeIdCtrl {
uint8_t vwc;
uint16_t awun;
uint16_t awupf;
- uint8_t rsvd703[174];
- uint8_t rsvd2047[1344];
+ uint8_t nvscc;
+ uint8_t rsvd531;
+ uint16_t acwu;
+ uint16_t ocfs;
+ uint32_t sgls;
+ uint8_t rsvd540[228];
+ uint8_t subnqn[256];
+ uint8_t rsvd1024[1024];
NvmePSD psd[32];
uint8_t vs[1024];
} NvmeIdCtrl;
+typedef struct NvmeIdCtrlZoned {
+ uint8_t zasl;
+ uint8_t rsvd1[4095];
+} NvmeIdCtrlZoned;
+
+typedef struct NvmeIdCtrlNvm {
+ uint8_t vsl;
+ uint8_t wzsl;
+ uint8_t wusl;
+ uint8_t dmrl;
+ uint32_t dmrsl;
+ uint64_t dmsl;
+ uint8_t rsvd16[4080];
+} NvmeIdCtrlNvm;
+
+enum NvmeIdCtrlOaes {
+ NVME_OAES_NS_ATTR = 1 << 8,
+};
+
+enum NvmeIdCtrlCtratt {
+ NVME_CTRATT_ENDGRPS = 1 << 4,
+ NVME_CTRATT_ELBAS = 1 << 15,
+ NVME_CTRATT_FDPS = 1 << 19,
+};
+
enum NvmeIdCtrlOacs {
- NVME_OACS_SECURITY = 1 << 0,
- NVME_OACS_FORMAT = 1 << 1,
- NVME_OACS_FW = 1 << 2,
+ NVME_OACS_SECURITY = 1 << 0,
+ NVME_OACS_FORMAT = 1 << 1,
+ NVME_OACS_FW = 1 << 2,
+ NVME_OACS_NS_MGMT = 1 << 3,
+ NVME_OACS_DIRECTIVES = 1 << 5,
+ NVME_OACS_DBBUF = 1 << 8,
};
enum NvmeIdCtrlOncs {
NVME_ONCS_COMPARE = 1 << 0,
NVME_ONCS_WRITE_UNCORR = 1 << 1,
NVME_ONCS_DSM = 1 << 2,
- NVME_ONCS_WRITE_ZEROS = 1 << 3,
+ NVME_ONCS_WRITE_ZEROES = 1 << 3,
NVME_ONCS_FEATURES = 1 << 4,
NVME_ONCS_RESRVATIONS = 1 << 5,
NVME_ONCS_TIMESTAMP = 1 << 6,
+ NVME_ONCS_VERIFY = 1 << 7,
+ NVME_ONCS_COPY = 1 << 8,
+};
+
+enum NvmeIdCtrlOcfs {
+ NVME_OCFS_COPY_FORMAT_0 = 1 << NVME_COPY_FORMAT_0,
+ NVME_OCFS_COPY_FORMAT_1 = 1 << NVME_COPY_FORMAT_1,
+};
+
+enum NvmeIdctrlVwc {
+ NVME_VWC_PRESENT = 1 << 0,
+ NVME_VWC_NSID_BROADCAST_NO_SUPPORT = 0 << 1,
+ NVME_VWC_NSID_BROADCAST_RESERVED = 1 << 1,
+ NVME_VWC_NSID_BROADCAST_CTRL_SPEC = 2 << 1,
+ NVME_VWC_NSID_BROADCAST_SUPPORT = 3 << 1,
+};
+
+enum NvmeIdCtrlFrmw {
+ NVME_FRMW_SLOT1_RO = 1 << 0,
+};
+
+enum NvmeIdCtrlLpa {
+ NVME_LPA_NS_SMART = 1 << 0,
+ NVME_LPA_CSE = 1 << 1,
+ NVME_LPA_EXTENDED = 1 << 2,
+};
+
+enum NvmeIdCtrlCmic {
+ NVME_CMIC_MULTI_CTRL = 1 << 1,
+};
+
+enum NvmeNsAttachmentOperation {
+ NVME_NS_ATTACHMENT_ATTACH = 0x0,
+ NVME_NS_ATTACHMENT_DETACH = 0x1,
};
#define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
@@ -769,21 +1233,18 @@ enum NvmeIdCtrlOncs {
#define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
#define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf)
-typedef struct NvmeFeatureVal {
- uint32_t arbitration;
- uint32_t power_mgmt;
- uint32_t temp_thresh;
- uint32_t err_rec;
- uint32_t volatile_wc;
- uint32_t num_queues;
- uint32_t int_coalescing;
- uint32_t *int_vector_config;
- uint32_t write_atomicity;
- uint32_t async_config;
- uint32_t sw_prog_marker;
-} NvmeFeatureVal;
+#define NVME_CTRL_SGLS_SUPPORT_MASK (0x3 << 0)
+#define NVME_CTRL_SGLS_SUPPORT_NO_ALIGN (0x1 << 0)
+#define NVME_CTRL_SGLS_SUPPORT_DWORD_ALIGN (0x1 << 1)
+#define NVME_CTRL_SGLS_KEYED (0x1 << 2)
+#define NVME_CTRL_SGLS_BITBUCKET (0x1 << 16)
+#define NVME_CTRL_SGLS_MPTR_CONTIGUOUS (0x1 << 17)
+#define NVME_CTRL_SGLS_EXCESS_LENGTH (0x1 << 18)
+#define NVME_CTRL_SGLS_MPTR_SGL (0x1 << 19)
+#define NVME_CTRL_SGLS_ADDR_OFFSET (0x1 << 20)
#define NVME_ARB_AB(arb) (arb & 0x7)
+#define NVME_ARB_AB_NOLIMIT 0x7
#define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff)
#define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff)
#define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff)
@@ -791,6 +1252,25 @@ typedef struct NvmeFeatureVal {
#define NVME_INTC_THR(intc) (intc & 0xff)
#define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff)
+#define NVME_INTVC_NOCOALESCING (0x1 << 16)
+
+#define NVME_TEMP_THSEL(temp) ((temp >> 20) & 0x3)
+#define NVME_TEMP_THSEL_OVER 0x0
+#define NVME_TEMP_THSEL_UNDER 0x1
+
+#define NVME_TEMP_TMPSEL(temp) ((temp >> 16) & 0xf)
+#define NVME_TEMP_TMPSEL_COMPOSITE 0x0
+
+#define NVME_TEMP_TMPTH(temp) (temp & 0xffff)
+
+#define NVME_AEC_SMART(aec) (aec & 0xff)
+#define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1)
+#define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
+#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)
+
+#define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff)
+#define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
+
enum NvmeFeatureIds {
NVME_ARBITRATION = 0x1,
NVME_POWER_MANAGEMENT = 0x2,
@@ -804,10 +1284,41 @@ enum NvmeFeatureIds {
NVME_WRITE_ATOMICITY = 0xa,
NVME_ASYNCHRONOUS_EVENT_CONF = 0xb,
NVME_TIMESTAMP = 0xe,
- NVME_SOFTWARE_PROGRESS_MARKER = 0x80
+ NVME_HOST_BEHAVIOR_SUPPORT = 0x16,
+ NVME_COMMAND_SET_PROFILE = 0x19,
+ NVME_FDP_MODE = 0x1d,
+ NVME_FDP_EVENTS = 0x1e,
+ NVME_SOFTWARE_PROGRESS_MARKER = 0x80,
+ NVME_FID_MAX = 0x100,
};
-typedef struct NvmeRangeType {
+typedef enum NvmeFeatureCap {
+ NVME_FEAT_CAP_SAVE = 1 << 0,
+ NVME_FEAT_CAP_NS = 1 << 1,
+ NVME_FEAT_CAP_CHANGE = 1 << 2,
+} NvmeFeatureCap;
+
+typedef enum NvmeGetFeatureSelect {
+ NVME_GETFEAT_SELECT_CURRENT = 0x0,
+ NVME_GETFEAT_SELECT_DEFAULT = 0x1,
+ NVME_GETFEAT_SELECT_SAVED = 0x2,
+ NVME_GETFEAT_SELECT_CAP = 0x3,
+} NvmeGetFeatureSelect;
+
+#define NVME_GETSETFEAT_FID_MASK 0xff
+#define NVME_GETSETFEAT_FID(dw10) (dw10 & NVME_GETSETFEAT_FID_MASK)
+
+#define NVME_GETFEAT_SELECT_SHIFT 8
+#define NVME_GETFEAT_SELECT_MASK 0x7
+#define NVME_GETFEAT_SELECT(dw10) \
+ ((dw10 >> NVME_GETFEAT_SELECT_SHIFT) & NVME_GETFEAT_SELECT_MASK)
+
+#define NVME_SETFEAT_SAVE_SHIFT 31
+#define NVME_SETFEAT_SAVE_MASK 0x1
+#define NVME_SETFEAT_SAVE(dw10) \
+ ((dw10 >> NVME_SETFEAT_SAVE_SHIFT) & NVME_SETFEAT_SAVE_MASK)
+
+typedef struct QEMU_PACKED NvmeRangeType {
uint8_t type;
uint8_t attributes;
uint8_t rsvd2[14];
@@ -817,13 +1328,29 @@ typedef struct NvmeRangeType {
uint8_t rsvd48[16];
} NvmeRangeType;
-typedef struct NvmeLBAF {
+typedef struct NvmeHostBehaviorSupport {
+ uint8_t acre;
+ uint8_t etdas;
+ uint8_t lbafee;
+ uint8_t rsvd3[509];
+} NvmeHostBehaviorSupport;
+
+typedef struct QEMU_PACKED NvmeLBAF {
uint16_t ms;
uint8_t ds;
uint8_t rp;
} NvmeLBAF;
-typedef struct NvmeIdNs {
+typedef struct QEMU_PACKED NvmeLBAFE {
+ uint64_t zsze;
+ uint8_t zdes;
+ uint8_t rsvd9[7];
+} NvmeLBAFE;
+
+#define NVME_NSID_BROADCAST 0xffffffff
+#define NVME_MAX_NLBAF 64
+
+typedef struct QEMU_PACKED NvmeIdNs {
uint64_t nsze;
uint64_t ncap;
uint64_t nuse;
@@ -833,18 +1360,103 @@ typedef struct NvmeIdNs {
uint8_t mc;
uint8_t dpc;
uint8_t dps;
-
uint8_t nmic;
uint8_t rescap;
uint8_t fpi;
uint8_t dlfeat;
-
- uint8_t res34[94];
- NvmeLBAF lbaf[16];
- uint8_t res192[192];
+ uint16_t nawun;
+ uint16_t nawupf;
+ uint16_t nacwu;
+ uint16_t nabsn;
+ uint16_t nabo;
+ uint16_t nabspf;
+ uint16_t noiob;
+ uint8_t nvmcap[16];
+ uint16_t npwg;
+ uint16_t npwa;
+ uint16_t npdg;
+ uint16_t npda;
+ uint16_t nows;
+ uint16_t mssrl;
+ uint32_t mcl;
+ uint8_t msrc;
+ uint8_t rsvd81[18];
+ uint8_t nsattr;
+ uint16_t nvmsetid;
+ uint16_t endgid;
+ uint8_t nguid[16];
+ uint64_t eui64;
+ NvmeLBAF lbaf[NVME_MAX_NLBAF];
uint8_t vs[3712];
} NvmeIdNs;
+#define NVME_ID_NS_NVM_ELBAF_PIF(elbaf) (((elbaf) >> 7) & 0x3)
+
+typedef struct QEMU_PACKED NvmeIdNsNvm {
+ uint64_t lbstm;
+ uint8_t pic;
+ uint8_t rsvd9[3];
+ uint32_t elbaf[NVME_MAX_NLBAF];
+ uint8_t rsvd268[3828];
+} NvmeIdNsNvm;
+
+typedef struct QEMU_PACKED NvmeIdNsDescr {
+ uint8_t nidt;
+ uint8_t nidl;
+ uint8_t rsvd2[2];
+} NvmeIdNsDescr;
+
+enum NvmeNsIdentifierLength {
+ NVME_NIDL_EUI64 = 8,
+ NVME_NIDL_NGUID = 16,
+ NVME_NIDL_UUID = 16,
+ NVME_NIDL_CSI = 1,
+};
+
+enum NvmeNsIdentifierType {
+ NVME_NIDT_EUI64 = 0x01,
+ NVME_NIDT_NGUID = 0x02,
+ NVME_NIDT_UUID = 0x03,
+ NVME_NIDT_CSI = 0x04,
+};
+
+enum NvmeIdNsNmic {
+ NVME_NMIC_NS_SHARED = 1 << 0,
+};
+
+enum NvmeCsi {
+ NVME_CSI_NVM = 0x00,
+ NVME_CSI_ZONED = 0x02,
+};
+
+#define NVME_SET_CSI(vec, csi) (vec |= (uint8_t)(1 << (csi)))
+
+typedef struct QEMU_PACKED NvmeIdNsZoned {
+ uint16_t zoc;
+ uint16_t ozcs;
+ uint32_t mar;
+ uint32_t mor;
+ uint32_t rrl;
+ uint32_t frl;
+ uint8_t rsvd12[24];
+ uint32_t numzrwa;
+ uint16_t zrwafg;
+ uint16_t zrwas;
+ uint8_t zrwacap;
+ uint8_t rsvd53[2763];
+ NvmeLBAFE lbafe[16];
+ uint8_t rsvd3072[768];
+ uint8_t vs[256];
+} NvmeIdNsZoned;
+
+enum NvmeIdNsZonedOzcs {
+ NVME_ID_NS_ZONED_OZCS_RAZB = 1 << 0,
+ NVME_ID_NS_ZONED_OZCS_ZRWASUP = 1 << 1,
+};
+
+enum NvmeIdNsZonedZrwacap {
+ NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP = 1 << 0,
+};
/*Deallocate Logical Block Features*/
#define NVME_ID_NS_DLFEAT_GUARD_CRC(dlfeat) ((dlfeat) & 0x10)
@@ -857,6 +1469,7 @@ typedef struct NvmeIdNs {
#define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1))
+#define NVME_ID_NS_NSFEAT_DULBE(nsfeat) ((nsfeat >> 2) & 0x1)
#define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1)
#define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf))
#define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1)
@@ -869,19 +1482,358 @@ typedef struct NvmeIdNs {
#define NVME_ID_NS_DPC_TYPE_MASK 0x7
enum NvmeIdNsDps {
- DPS_TYPE_NONE = 0,
- DPS_TYPE_1 = 1,
- DPS_TYPE_2 = 2,
- DPS_TYPE_3 = 3,
- DPS_TYPE_MASK = 0x7,
- DPS_FIRST_EIGHT = 8,
+ NVME_ID_NS_DPS_TYPE_NONE = 0,
+ NVME_ID_NS_DPS_TYPE_1 = 1,
+ NVME_ID_NS_DPS_TYPE_2 = 2,
+ NVME_ID_NS_DPS_TYPE_3 = 3,
+ NVME_ID_NS_DPS_TYPE_MASK = 0x7,
+ NVME_ID_NS_DPS_FIRST_EIGHT = 8,
+};
+
+enum NvmeIdNsFlbas {
+ NVME_ID_NS_FLBAS_EXTENDED = 1 << 4,
+};
+
+enum NvmeIdNsMc {
+ NVME_ID_NS_MC_EXTENDED = 1 << 0,
+ NVME_ID_NS_MC_SEPARATE = 1 << 1,
+};
+
+#define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK)
+
+enum NvmePIFormat {
+ NVME_PI_GUARD_16 = 0,
+ NVME_PI_GUARD_64 = 2,
+};
+
+typedef union NvmeDifTuple {
+ struct {
+ uint16_t guard;
+ uint16_t apptag;
+ uint32_t reftag;
+ } g16;
+
+ struct {
+ uint64_t guard;
+ uint16_t apptag;
+ uint8_t sr[6];
+ } g64;
+} NvmeDifTuple;
+
+enum NvmeZoneAttr {
+ NVME_ZA_FINISHED_BY_CTLR = 1 << 0,
+ NVME_ZA_FINISH_RECOMMENDED = 1 << 1,
+ NVME_ZA_RESET_RECOMMENDED = 1 << 2,
+ NVME_ZA_ZRWA_VALID = 1 << 3,
+ NVME_ZA_ZD_EXT_VALID = 1 << 7,
+};
+
+typedef struct QEMU_PACKED NvmeZoneReportHeader {
+ uint64_t nr_zones;
+ uint8_t rsvd[56];
+} NvmeZoneReportHeader;
+
+enum NvmeZoneReceiveAction {
+ NVME_ZONE_REPORT = 0,
+ NVME_ZONE_REPORT_EXTENDED = 1,
+};
+
+enum NvmeZoneReportType {
+ NVME_ZONE_REPORT_ALL = 0,
+ NVME_ZONE_REPORT_EMPTY = 1,
+ NVME_ZONE_REPORT_IMPLICITLY_OPEN = 2,
+ NVME_ZONE_REPORT_EXPLICITLY_OPEN = 3,
+ NVME_ZONE_REPORT_CLOSED = 4,
+ NVME_ZONE_REPORT_FULL = 5,
+ NVME_ZONE_REPORT_READ_ONLY = 6,
+ NVME_ZONE_REPORT_OFFLINE = 7,
+};
+
+enum NvmeZoneType {
+ NVME_ZONE_TYPE_RESERVED = 0x00,
+ NVME_ZONE_TYPE_SEQ_WRITE = 0x02,
+};
+
+typedef struct QEMU_PACKED NvmeZoneSendCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t rsvd8[4];
+ NvmeCmdDptr dptr;
+ uint64_t slba;
+ uint32_t rsvd48;
+ uint8_t zsa;
+ uint8_t zsflags;
+ uint8_t rsvd54[2];
+ uint32_t rsvd56[2];
+} NvmeZoneSendCmd;
+
+enum NvmeZoneSendAction {
+ NVME_ZONE_ACTION_RSD = 0x00,
+ NVME_ZONE_ACTION_CLOSE = 0x01,
+ NVME_ZONE_ACTION_FINISH = 0x02,
+ NVME_ZONE_ACTION_OPEN = 0x03,
+ NVME_ZONE_ACTION_RESET = 0x04,
+ NVME_ZONE_ACTION_OFFLINE = 0x05,
+ NVME_ZONE_ACTION_SET_ZD_EXT = 0x10,
+ NVME_ZONE_ACTION_ZRWA_FLUSH = 0x11,
+};
+
+enum {
+ NVME_ZSFLAG_SELECT_ALL = 1 << 0,
+ NVME_ZSFLAG_ZRWA_ALLOC = 1 << 1,
+};
+
+typedef struct QEMU_PACKED NvmeZoneDescr {
+ uint8_t zt;
+ uint8_t zs;
+ uint8_t za;
+ uint8_t rsvd3[5];
+ uint64_t zcap;
+ uint64_t zslba;
+ uint64_t wp;
+ uint8_t rsvd32[32];
+} NvmeZoneDescr;
+
+typedef enum NvmeZoneState {
+ NVME_ZONE_STATE_RESERVED = 0x00,
+ NVME_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZONE_STATE_IMPLICITLY_OPEN = 0x02,
+ NVME_ZONE_STATE_EXPLICITLY_OPEN = 0x03,
+ NVME_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZONE_STATE_READ_ONLY = 0x0d,
+ NVME_ZONE_STATE_FULL = 0x0e,
+ NVME_ZONE_STATE_OFFLINE = 0x0f,
+} NvmeZoneState;
+
+typedef struct QEMU_PACKED NvmePriCtrlCap {
+ uint16_t cntlid;
+ uint16_t portid;
+ uint8_t crt;
+ uint8_t rsvd5[27];
+ uint32_t vqfrt;
+ uint32_t vqrfa;
+ uint16_t vqrfap;
+ uint16_t vqprt;
+ uint16_t vqfrsm;
+ uint16_t vqgran;
+ uint8_t rsvd48[16];
+ uint32_t vifrt;
+ uint32_t virfa;
+ uint16_t virfap;
+ uint16_t viprt;
+ uint16_t vifrsm;
+ uint16_t vigran;
+ uint8_t rsvd80[4016];
+} NvmePriCtrlCap;
+
+typedef enum NvmePriCtrlCapCrt {
+ NVME_CRT_VQ = 1 << 0,
+ NVME_CRT_VI = 1 << 1,
+} NvmePriCtrlCapCrt;
+
+typedef struct QEMU_PACKED NvmeSecCtrlEntry {
+ uint16_t scid;
+ uint16_t pcid;
+ uint8_t scs;
+ uint8_t rsvd5[3];
+ uint16_t vfn;
+ uint16_t nvq;
+ uint16_t nvi;
+ uint8_t rsvd14[18];
+} NvmeSecCtrlEntry;
+
+typedef struct QEMU_PACKED NvmeSecCtrlList {
+ uint8_t numcntl;
+ uint8_t rsvd1[31];
+ NvmeSecCtrlEntry sec[127];
+} NvmeSecCtrlList;
+
+typedef enum NvmeVirtMngmtAction {
+ NVME_VIRT_MNGMT_ACTION_PRM_ALLOC = 0x01,
+ NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE = 0x07,
+ NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN = 0x08,
+ NVME_VIRT_MNGMT_ACTION_SEC_ONLINE = 0x09,
+} NvmeVirtMngmtAction;
+
+typedef enum NvmeVirtualResourceType {
+ NVME_VIRT_RES_QUEUE = 0x00,
+ NVME_VIRT_RES_INTERRUPT = 0x01,
+} NvmeVirtualResourceType;
+
+typedef struct NvmeDirectiveIdentify {
+ uint8_t supported;
+ uint8_t unused1[31];
+ uint8_t enabled;
+ uint8_t unused33[31];
+ uint8_t persistent;
+ uint8_t unused65[31];
+ uint8_t rsvd64[4000];
+} NvmeDirectiveIdentify;
+
+enum NvmeDirectiveTypes {
+ NVME_DIRECTIVE_IDENTIFY = 0x0,
+ NVME_DIRECTIVE_DATA_PLACEMENT = 0x2,
+};
+
+enum NvmeDirectiveOperations {
+ NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
+};
+
+typedef struct QEMU_PACKED NvmeFdpConfsHdr {
+ uint16_t num_confs;
+ uint8_t version;
+ uint8_t rsvd3;
+ uint32_t size;
+ uint8_t rsvd8[8];
+} NvmeFdpConfsHdr;
+
+REG8(FDPA, 0x0)
+ FIELD(FDPA, RGIF, 0, 4)
+ FIELD(FDPA, VWC, 4, 1)
+ FIELD(FDPA, VALID, 7, 1);
+
+typedef struct QEMU_PACKED NvmeFdpDescrHdr {
+ uint16_t descr_size;
+ uint8_t fdpa;
+ uint8_t vss;
+ uint32_t nrg;
+ uint16_t nruh;
+ uint16_t maxpids;
+ uint32_t nnss;
+ uint64_t runs;
+ uint32_t erutl;
+ uint8_t rsvd28[36];
+} NvmeFdpDescrHdr;
+
+enum NvmeRuhType {
+ NVME_RUHT_INITIALLY_ISOLATED = 1,
+ NVME_RUHT_PERSISTENTLY_ISOLATED = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhDescr {
+ uint8_t ruht;
+ uint8_t rsvd1[3];
+} NvmeRuhDescr;
+
+typedef struct QEMU_PACKED NvmeRuhuLog {
+ uint16_t nruh;
+ uint8_t rsvd2[6];
+} NvmeRuhuLog;
+
+enum NvmeRuhAttributes {
+ NVME_RUHA_UNUSED = 0,
+ NVME_RUHA_HOST = 1,
+ NVME_RUHA_CTRL = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhuDescr {
+ uint8_t ruha;
+ uint8_t rsvd1[7];
+} NvmeRuhuDescr;
+
+typedef struct QEMU_PACKED NvmeFdpStatsLog {
+ uint64_t hbmw[2];
+ uint64_t mbmw[2];
+ uint64_t mbe[2];
+ uint8_t rsvd48[16];
+} NvmeFdpStatsLog;
+
+typedef struct QEMU_PACKED NvmeFdpEventsLog {
+ uint32_t num_events;
+ uint8_t rsvd4[60];
+} NvmeFdpEventsLog;
+
+enum NvmeFdpEventType {
+ FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0,
+ FDP_EVT_RU_ATL_EXCEEDED = 0x1,
+ FDP_EVT_CTRL_RESET_RUH = 0x2,
+ FDP_EVT_INVALID_PID = 0x3,
+ FDP_EVT_MEDIA_REALLOC = 0x80,
+ FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81,
+};
+
+enum NvmeFdpEventFlags {
+ FDPEF_PIV = 1 << 0,
+ FDPEF_NSIDV = 1 << 1,
+ FDPEF_LV = 1 << 2,
+};
+
+typedef struct QEMU_PACKED NvmeFdpEvent {
+ uint8_t type;
+ uint8_t flags;
+ uint16_t pid;
+ uint64_t timestamp;
+ uint32_t nsid;
+ uint64_t type_specific[2];
+ uint16_t rgid;
+ uint8_t ruhid;
+ uint8_t rsvd35[5];
+ uint64_t vendor[3];
+} NvmeFdpEvent;
+
+typedef struct QEMU_PACKED NvmePhidList {
+ uint16_t nnruhd;
+ uint8_t rsvd2[6];
+} NvmePhidList;
+
+typedef struct QEMU_PACKED NvmePhidDescr {
+ uint8_t ruht;
+ uint8_t rsvd1;
+ uint16_t ruhid;
+} NvmePhidDescr;
+
+REG32(FEAT_FDP, 0x0)
+ FIELD(FEAT_FDP, FDPE, 0, 1)
+ FIELD(FEAT_FDP, CONF_NDX, 8, 8);
+
+typedef struct QEMU_PACKED NvmeFdpEventDescr {
+ uint8_t evt;
+ uint8_t evta;
+} NvmeFdpEventDescr;
+
+REG32(NVME_IOMR, 0x0)
+ FIELD(NVME_IOMR, MO, 0, 8)
+ FIELD(NVME_IOMR, MOS, 16, 16);
+
+enum NvmeIomr2Mo {
+ NVME_IOMR_MO_NOP = 0x0,
+ NVME_IOMR_MO_RUH_STATUS = 0x1,
+ NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255,
+};
+
+typedef struct QEMU_PACKED NvmeRuhStatus {
+ uint8_t rsvd0[14];
+ uint16_t nruhsd;
+} NvmeRuhStatus;
+
+typedef struct QEMU_PACKED NvmeRuhStatusDescr {
+ uint16_t pid;
+ uint16_t ruhid;
+ uint32_t earutr;
+ uint64_t ruamw;
+ uint8_t rsvd16[16];
+} NvmeRuhStatusDescr;
+
+REG32(NVME_IOMS, 0x0)
+ FIELD(NVME_IOMS, MO, 0, 8)
+ FIELD(NVME_IOMS, MOS, 16, 16);
+
+enum NvmeIoms2Mo {
+ NVME_IOMS_MO_NOP = 0x0,
+ NVME_IOMS_MO_RUH_UPDATE = 0x1,
};
static inline void _nvme_check_size(void)
{
+ QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8);
QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1) != 40);
QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);
@@ -889,11 +1841,29 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeCopyCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeHostBehaviorSupport) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlNvm) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsDescr) != 4);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeZoneDescr) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeDifTuple) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096);
}
#endif
diff --git a/include/block/qapi.h b/include/block/qapi.h
index 22c7807c89..54c48de26a 100644
--- a/include/block/qapi.h
+++ b/include/block/qapi.h
@@ -25,21 +25,28 @@
#ifndef BLOCK_QAPI_H
#define BLOCK_QAPI_H
-#include "block/block.h"
+#include "block/graph-lock.h"
#include "block/snapshot.h"
+#include "qapi/qapi-types-block-core.h"
-BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
- BlockDriverState *bs,
- bool flat,
- Error **errp);
-int bdrv_query_snapshot_info_list(BlockDriverState *bs,
- SnapshotInfoList **p_list,
- Error **errp);
-void bdrv_query_image_info(BlockDriverState *bs,
- ImageInfo **p_info,
- Error **errp);
+BlockDeviceInfo * GRAPH_RDLOCK
+bdrv_block_device_info(BlockBackend *blk, BlockDriverState *bs,
+ bool flat, Error **errp);
+
+int GRAPH_RDLOCK
+bdrv_query_snapshot_info_list(BlockDriverState *bs,
+ SnapshotInfoList **p_list,
+ Error **errp);
+void GRAPH_RDLOCK
+bdrv_query_image_info(BlockDriverState *bs, ImageInfo **p_info, bool flat,
+ bool skip_implicit_filters, Error **errp);
+void GRAPH_RDLOCK
+bdrv_query_block_graph_info(BlockDriverState *bs, BlockGraphInfo **p_info,
+ Error **errp);
void bdrv_snapshot_dump(QEMUSnapshotInfo *sn);
-void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec);
-void bdrv_image_info_dump(ImageInfo *info);
+void bdrv_image_info_specific_dump(ImageInfoSpecific *info_spec,
+ const char *prefix,
+ int indentation);
+void bdrv_node_info_dump(BlockNodeInfo *info, int indentation, bool protocol);
#endif
diff --git a/include/block/qdict.h b/include/block/qdict.h
index d8cb502d7d..b4c28d96a9 100644
--- a/include/block/qdict.h
+++ b/include/block/qdict.h
@@ -12,6 +12,9 @@
#include "qapi/qmp/qdict.h"
+QObject *qdict_crumple(const QDict *src, Error **errp);
+void qdict_flatten(QDict *qdict);
+
void qdict_copy_default(QDict *dst, QDict *src, const char *key);
void qdict_set_default_str(QDict *dst, const char *key, const char *val);
@@ -20,8 +23,6 @@ void qdict_join(QDict *dest, QDict *src, bool overwrite);
void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start);
void qdict_array_split(QDict *src, QList **dst);
int qdict_array_entries(QDict *src, const char *subqdict);
-QObject *qdict_crumple(const QDict *src, Error **errp);
-void qdict_flatten(QDict *qdict);
typedef struct QDictRenames {
const char *from;
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
index 251b10d273..20e000b8ef 100644
--- a/include/block/raw-aio.h
+++ b/include/block/raw-aio.h
@@ -17,7 +17,6 @@
#define QEMU_RAW_AIO_H
#include "block/aio.h"
-#include "qemu/coroutine.h"
#include "qemu/iov.h"
/* AIO request types */
@@ -29,6 +28,9 @@
#define QEMU_AIO_WRITE_ZEROES 0x0020
#define QEMU_AIO_COPY_RANGE 0x0040
#define QEMU_AIO_TRUNCATE 0x0080
+#define QEMU_AIO_ZONE_REPORT 0x0100
+#define QEMU_AIO_ZONE_MGMT 0x0200
+#define QEMU_AIO_ZONE_APPEND 0x0400
#define QEMU_AIO_TYPE_MASK \
(QEMU_AIO_READ | \
QEMU_AIO_WRITE | \
@@ -37,7 +39,10 @@
QEMU_AIO_DISCARD | \
QEMU_AIO_WRITE_ZEROES | \
QEMU_AIO_COPY_RANGE | \
- QEMU_AIO_TRUNCATE)
+ QEMU_AIO_TRUNCATE | \
+ QEMU_AIO_ZONE_REPORT | \
+ QEMU_AIO_ZONE_MGMT | \
+ QEMU_AIO_ZONE_APPEND)
/* AIO flags */
#define QEMU_AIO_MISALIGNED 0x1000
@@ -50,24 +55,24 @@
typedef struct LinuxAioState LinuxAioState;
LinuxAioState *laio_init(Error **errp);
void laio_cleanup(LinuxAioState *s);
-int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
- uint64_t offset, QEMUIOVector *qiov, int type);
+
+/* laio_co_submit: submit I/O requests in the thread's current AioContext. */
+int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
+ int type, uint64_t dev_max_batch);
+
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
-void laio_io_plug(BlockDriverState *bs, LinuxAioState *s);
-void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s);
#endif
/* io_uring.c - Linux io_uring implementation */
#ifdef CONFIG_LINUX_IO_URING
-typedef struct LuringState LuringState;
LuringState *luring_init(Error **errp);
void luring_cleanup(LuringState *s);
-int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
- uint64_t offset, QEMUIOVector *qiov, int type);
+
+/* luring_co_submit: submit I/O requests in the thread's current AioContext. */
+int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
+ QEMUIOVector *qiov, int type);
void luring_detach_aio_context(LuringState *s, AioContext *old_context);
void luring_attach_aio_context(LuringState *s, AioContext *new_context);
-void luring_io_plug(BlockDriverState *bs, LuringState *s);
-void luring_io_unplug(BlockDriverState *bs, LuringState *s);
#endif
#ifdef _WIN32
diff --git a/include/block/replication.h b/include/block/replication.h
new file mode 100644
index 0000000000..21931b4f0c
--- /dev/null
+++ b/include/block/replication.h
@@ -0,0 +1,175 @@
+/*
+ * Replication filter
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ * Copyright (c) 2016 Intel Corporation
+ * Copyright (c) 2016 FUJITSU LIMITED
+ *
+ * Author:
+ * Changlong Xie <xiecl.fnst@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef REPLICATION_H
+#define REPLICATION_H
+
+#include "qapi/qapi-types-block-core.h"
+#include "qemu/module.h"
+#include "qemu/queue.h"
+
+typedef struct ReplicationOps ReplicationOps;
+typedef struct ReplicationState ReplicationState;
+
+/**
+ * SECTION:block/replication.h
+ * @title:Base Replication System
+ * @short_description: interfaces for handling replication
+ *
+ * The Replication Model provides a framework for handling Replication
+ *
+ * <example>
+ * <title>How to use replication interfaces</title>
+ * <programlisting>
+ * #include "block/replication.h"
+ *
+ * typedef struct BDRVReplicationState {
+ * ReplicationState *rs;
+ * } BDRVReplicationState;
+ *
+ * static void replication_start(ReplicationState *rs, ReplicationMode mode,
+ * Error **errp);
+ * static void replication_do_checkpoint(ReplicationState *rs, Error **errp);
+ * static void replication_get_error(ReplicationState *rs, Error **errp);
+ * static void replication_stop(ReplicationState *rs, bool failover,
+ * Error **errp);
+ *
+ * static ReplicationOps replication_ops = {
+ * .start = replication_start,
+ * .checkpoint = replication_do_checkpoint,
+ * .get_error = replication_get_error,
+ * .stop = replication_stop,
+ * }
+ *
+ * static int replication_open(BlockDriverState *bs, QDict *options,
+ * int flags, Error **errp)
+ * {
+ * BDRVReplicationState *s = bs->opaque;
+ * s->rs = replication_new(bs, &replication_ops);
+ * return 0;
+ * }
+ *
+ * static void replication_close(BlockDriverState *bs)
+ * {
+ * BDRVReplicationState *s = bs->opaque;
+ * replication_remove(s->rs);
+ * }
+ *
+ * BlockDriver bdrv_replication = {
+ * .format_name = "replication",
+ * .instance_size = sizeof(BDRVReplicationState),
+ *
+ * .bdrv_open = replication_open,
+ * .bdrv_close = replication_close,
+ * };
+ *
+ * static void bdrv_replication_init(void)
+ * {
+ * bdrv_register(&bdrv_replication);
+ * }
+ *
+ * block_init(bdrv_replication_init);
+ * </programlisting>
+ * </example>
+ *
+ * We create an example about how to use replication interfaces in above.
+ * Then in migration, we can use replication_(start/stop/do_checkpoint/
+ * get_error)_all to handle all replication operations.
+ */
+
+/**
+ * ReplicationState:
+ * @opaque: opaque pointer value passed to this ReplicationState
+ * @ops: replication operation of this ReplicationState
+ * @node: node that we will insert into @replication_states QLIST
+ */
+struct ReplicationState {
+ void *opaque;
+ ReplicationOps *ops;
+ QLIST_ENTRY(ReplicationState) node;
+};
+
+/**
+ * ReplicationOps:
+ * @start: callback to start replication
+ * @stop: callback to stop replication
+ * @checkpoint: callback to do checkpoint
+ * @get_error: callback to check if error occurred during replication
+ */
+struct ReplicationOps {
+ void (*start)(ReplicationState *rs, ReplicationMode mode, Error **errp);
+ void (*stop)(ReplicationState *rs, bool failover, Error **errp);
+ void (*checkpoint)(ReplicationState *rs, Error **errp);
+ void (*get_error)(ReplicationState *rs, Error **errp);
+};
+
+/**
+ * replication_new:
+ * @opaque: opaque pointer value passed to ReplicationState
+ * @ops: replication operation of the new relevant ReplicationState
+ *
+ * Called to create a new ReplicationState instance, and then insert it
+ * into @replication_states QLIST
+ *
+ * Returns: the new ReplicationState instance
+ */
+ReplicationState *replication_new(void *opaque, ReplicationOps *ops);
+
+/**
+ * replication_remove:
+ * @rs: the ReplicationState instance to remove
+ *
+ * Called to remove a ReplicationState instance, and then delete it from
+ * @replication_states QLIST
+ */
+void replication_remove(ReplicationState *rs);
+
+/**
+ * replication_start_all:
+ * @mode: replication mode that could be "primary" or "secondary"
+ * @errp: returns an error if this function fails
+ *
+ * Start replication, called in migration/checkpoint thread
+ *
+ * Note: the caller of the function MUST make sure vm stopped
+ */
+void replication_start_all(ReplicationMode mode, Error **errp);
+
+/**
+ * replication_do_checkpoint_all:
+ * @errp: returns an error if this function fails
+ *
+ * This interface is called after all VM state is transferred to Secondary QEMU
+ */
+void replication_do_checkpoint_all(Error **errp);
+
+/**
+ * replication_get_error_all:
+ * @errp: returns an error if this function fails
+ *
+ * This interface is called to check if error occurred during replication
+ */
+void replication_get_error_all(Error **errp);
+
+/**
+ * replication_stop_all:
+ * @failover: boolean value that indicates if we need do failover or not
+ * @errp: returns an error if this function fails
+ *
+ * It is called on failover. The vm should be stopped before calling it, if you
+ * use this API to shutdown the guest, or other things except failover
+ */
+void replication_stop_all(bool failover, Error **errp);
+
+#endif /* REPLICATION_H */
diff --git a/include/block/reqlist.h b/include/block/reqlist.h
new file mode 100644
index 0000000000..5253497bae
--- /dev/null
+++ b/include/block/reqlist.h
@@ -0,0 +1,75 @@
+/*
+ * reqlist API
+ *
+ * Copyright (C) 2013 Proxmox Server Solutions
+ * Copyright (c) 2021 Virtuozzo International GmbH.
+ *
+ * Authors:
+ * Dietmar Maurer (dietmar@proxmox.com)
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef REQLIST_H
+#define REQLIST_H
+
+#include "qemu/coroutine.h"
+
+/*
+ * The API is not thread-safe and shouldn't be. The struct is public to be part
+ * of other structures and protected by third-party locks, see
+ * block/block-copy.c for example.
+ */
+
+typedef struct BlockReq {
+ int64_t offset;
+ int64_t bytes;
+
+ CoQueue wait_queue; /* coroutines blocked on this req */
+ QLIST_ENTRY(BlockReq) list;
+} BlockReq;
+
+typedef QLIST_HEAD(, BlockReq) BlockReqList;
+
+/*
+ * Initialize new request and add it to the list. Caller must be sure that
+ * there are no conflicting requests in the list.
+ */
+void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset,
+ int64_t bytes);
+/* Search for request in the list intersecting with @offset/@bytes area. */
+BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
+ int64_t bytes);
+
+/*
+ * If there are no intersecting requests return false. Otherwise, wait for the
+ * first found intersecting request to finish and return true.
+ *
+ * @lock is passed to qemu_co_queue_wait()
+ * False return value proves that lock was released at no point.
+ */
+bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
+ int64_t bytes, CoMutex *lock);
+
+/*
+ * Wait for all intersecting requests. It just calls reqlist_wait_one() in a
+ * loop, caller is responsible to stop producing new requests in this region
+ * in parallel, otherwise reqlist_wait_all() may never return.
+ */
+void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset,
+ int64_t bytes, CoMutex *lock);
+
+/*
+ * Shrink request and wake all waiting coroutines (maybe some of them are not
+ * intersecting with shrunk request).
+ */
+void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes);
+
+/*
+ * Remove request and wake all waiting coroutines. Do not release any memory.
+ */
+void coroutine_fn reqlist_remove_req(BlockReq *req);
+
+#endif /* REQLIST_H */
diff --git a/include/block/snapshot.h b/include/block/snapshot.h
index 2bfcd57578..304cc6ea61 100644
--- a/include/block/snapshot.h
+++ b/include/block/snapshot.h
@@ -25,7 +25,8 @@
#ifndef SNAPSHOT_H
#define SNAPSHOT_H
-
+#include "block/graph-lock.h"
+#include "qapi/qapi-builtin-types.h"
#define SNAPSHOT_OPT_BASE "snapshot."
#define SNAPSHOT_OPT_ID "snapshot.id"
@@ -42,8 +43,16 @@ typedef struct QEMUSnapshotInfo {
uint32_t date_sec; /* UTC date of the snapshot */
uint32_t date_nsec;
uint64_t vm_clock_nsec; /* VM clock relative to boot */
+ uint64_t icount; /* record/replay step */
} QEMUSnapshotInfo;
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
int bdrv_snapshot_find(BlockDriverState *bs, QEMUSnapshotInfo *sn_info,
const char *name);
bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
@@ -51,16 +60,19 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
const char *name,
QEMUSnapshotInfo *sn_info,
Error **errp);
-int bdrv_can_snapshot(BlockDriverState *bs);
-int bdrv_snapshot_create(BlockDriverState *bs,
- QEMUSnapshotInfo *sn_info);
-int bdrv_snapshot_goto(BlockDriverState *bs,
- const char *snapshot_id,
- Error **errp);
-int bdrv_snapshot_delete(BlockDriverState *bs,
- const char *snapshot_id,
- const char *name,
- Error **errp);
+
+int GRAPH_RDLOCK bdrv_can_snapshot(BlockDriverState *bs);
+
+int GRAPH_RDLOCK
+bdrv_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
+
+int GRAPH_UNLOCKED
+bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp);
+
+int GRAPH_RDLOCK
+bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
+ const char *name, Error **errp);
+
int bdrv_snapshot_list(BlockDriverState *bs,
QEMUSnapshotInfo **psn_info);
int bdrv_snapshot_load_tmp(BlockDriverState *bs,
@@ -72,21 +84,30 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
Error **errp);
-/* Group operations. All block drivers are involved.
- * These functions will properly handle dataplane (take aio_context_acquire
- * when appropriate for appropriate block drivers */
+/*
+ * Group operations. All block drivers are involved.
+ */
-bool bdrv_all_can_snapshot(BlockDriverState **first_bad_bs);
-int bdrv_all_delete_snapshot(const char *name, BlockDriverState **first_bsd_bs,
+bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
+ Error **errp);
+int bdrv_all_delete_snapshot(const char *name,
+ bool has_devices, strList *devices,
Error **errp);
-int bdrv_all_goto_snapshot(const char *name, BlockDriverState **first_bad_bs,
+int bdrv_all_goto_snapshot(const char *name,
+ bool has_devices, strList *devices,
Error **errp);
-int bdrv_all_find_snapshot(const char *name, BlockDriverState **first_bad_bs);
+int bdrv_all_has_snapshot(const char *name,
+ bool has_devices, strList *devices,
+ Error **errp);
int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
BlockDriverState *vm_state_bs,
uint64_t vm_state_size,
- BlockDriverState **first_bad_bs);
+ bool has_devices,
+ strList *devices,
+ Error **errp);
-BlockDriverState *bdrv_all_find_vmstate_bs(void);
+BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
+ bool has_devices, strList *devices,
+ Error **errp);
#endif
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 7dd7d730a0..948ff5f30c 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -18,7 +18,9 @@
#ifndef QEMU_THREAD_POOL_H
#define QEMU_THREAD_POOL_H
-#include "block/block.h"
+#include "block/aio.h"
+
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
typedef int ThreadPoolFunc(void *opaque);
@@ -27,11 +29,15 @@ typedef struct ThreadPool ThreadPool;
ThreadPool *thread_pool_new(struct AioContext *ctx);
void thread_pool_free(ThreadPool *pool);
-BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
- ThreadPoolFunc *func, void *arg,
- BlockCompletionFunc *cb, void *opaque);
-int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
- ThreadPoolFunc *func, void *arg);
-void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
+/*
+ * thread_pool_submit* API: submit I/O requests in the thread's
+ * current AioContext.
+ */
+BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
+ BlockCompletionFunc *cb, void *opaque);
+int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
+void thread_pool_submit(ThreadPoolFunc *func, void *arg);
+
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
#endif
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index 712a8e64b4..2355e8d9de 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -25,8 +25,9 @@
#ifndef THROTTLE_GROUPS_H
#define THROTTLE_GROUPS_H
+#include "qemu/coroutine.h"
#include "qemu/throttle.h"
-#include "block/block_int.h"
+#include "qom/object.h"
/* The ThrottleGroupMember structure indicates membership in a ThrottleGroup
* and holds related data.
@@ -36,7 +37,7 @@ typedef struct ThrottleGroupMember {
AioContext *aio_context;
/* throttled_reqs_lock protects the CoQueues for throttled requests. */
CoMutex throttled_reqs_lock;
- CoQueue throttled_reqs[2];
+ CoQueue throttled_reqs[THROTTLE_MAX];
/* Nonzero if the I/O limits are currently being ignored; generally
* it is zero. Accessed with atomic operations.
@@ -53,13 +54,13 @@ typedef struct ThrottleGroupMember {
* throttle_state tells us if I/O limits are configured. */
ThrottleState *throttle_state;
ThrottleTimers throttle_timers;
- unsigned pending_reqs[2];
+ unsigned pending_reqs[THROTTLE_MAX];
QLIST_ENTRY(ThrottleGroupMember) round_robin;
} ThrottleGroupMember;
#define TYPE_THROTTLE_GROUP "throttle-group"
-#define THROTTLE_GROUP(obj) OBJECT_CHECK(ThrottleGroup, (obj), TYPE_THROTTLE_GROUP)
+OBJECT_DECLARE_SIMPLE_TYPE(ThrottleGroup, THROTTLE_GROUP)
const char *throttle_group_get_name(ThrottleGroupMember *tgm);
@@ -76,8 +77,8 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm);
void throttle_group_restart_tgm(ThrottleGroupMember *tgm);
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
- unsigned int bytes,
- bool is_write);
+ int64_t bytes,
+ ThrottleDirection direction);
void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
AioContext *new_context);
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm);
diff --git a/include/block/ufs.h b/include/block/ufs.h
new file mode 100644
index 0000000000..d61598b8f3
--- /dev/null
+++ b/include/block/ufs.h
@@ -0,0 +1,1090 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef BLOCK_UFS_H
+#define BLOCK_UFS_H
+
+#include "hw/registerfields.h"
+
+typedef struct QEMU_PACKED UfsReg {
+ uint32_t cap;
+ uint32_t rsvd0;
+ uint32_t ver;
+ uint32_t rsvd1;
+ uint32_t hcpid;
+ uint32_t hcmid;
+ uint32_t ahit;
+ uint32_t rsvd2;
+ uint32_t is;
+ uint32_t ie;
+ uint32_t rsvd3[2];
+ uint32_t hcs;
+ uint32_t hce;
+ uint32_t uecpa;
+ uint32_t uecdl;
+ uint32_t uecn;
+ uint32_t uect;
+ uint32_t uecdme;
+ uint32_t utriacr;
+ uint32_t utrlba;
+ uint32_t utrlbau;
+ uint32_t utrldbr;
+ uint32_t utrlclr;
+ uint32_t utrlrsr;
+ uint32_t utrlcnr;
+ uint32_t rsvd4[2];
+ uint32_t utmrlba;
+ uint32_t utmrlbau;
+ uint32_t utmrldbr;
+ uint32_t utmrlclr;
+ uint32_t utmrlrsr;
+ uint32_t rsvd5[3];
+ uint32_t uiccmd;
+ uint32_t ucmdarg1;
+ uint32_t ucmdarg2;
+ uint32_t ucmdarg3;
+ uint32_t rsvd6[4];
+ uint32_t rsvd7[4];
+ uint32_t rsvd8[16];
+ uint32_t ccap;
+} UfsReg;
+
+REG32(CAP, offsetof(UfsReg, cap))
+ FIELD(CAP, NUTRS, 0, 5)
+ FIELD(CAP, RTT, 8, 8)
+ FIELD(CAP, NUTMRS, 16, 3)
+ FIELD(CAP, AUTOH8, 23, 1)
+ FIELD(CAP, 64AS, 24, 1)
+ FIELD(CAP, OODDS, 25, 1)
+ FIELD(CAP, UICDMETMS, 26, 1)
+ FIELD(CAP, CS, 28, 1)
+REG32(VER, offsetof(UfsReg, ver))
+REG32(HCPID, offsetof(UfsReg, hcpid))
+REG32(HCMID, offsetof(UfsReg, hcmid))
+REG32(AHIT, offsetof(UfsReg, ahit))
+REG32(IS, offsetof(UfsReg, is))
+ FIELD(IS, UTRCS, 0, 1)
+ FIELD(IS, UDEPRI, 1, 1)
+ FIELD(IS, UE, 2, 1)
+ FIELD(IS, UTMS, 3, 1)
+ FIELD(IS, UPMS, 4, 1)
+ FIELD(IS, UHXS, 5, 1)
+ FIELD(IS, UHES, 6, 1)
+ FIELD(IS, ULLS, 7, 1)
+ FIELD(IS, ULSS, 8, 1)
+ FIELD(IS, UTMRCS, 9, 1)
+ FIELD(IS, UCCS, 10, 1)
+ FIELD(IS, DFES, 11, 1)
+ FIELD(IS, UTPES, 12, 1)
+ FIELD(IS, HCFES, 16, 1)
+ FIELD(IS, SBFES, 17, 1)
+ FIELD(IS, CEFES, 18, 1)
+REG32(IE, offsetof(UfsReg, ie))
+ FIELD(IE, UTRCE, 0, 1)
+ FIELD(IE, UDEPRIE, 1, 1)
+ FIELD(IE, UEE, 2, 1)
+ FIELD(IE, UTMSE, 3, 1)
+ FIELD(IE, UPMSE, 4, 1)
+ FIELD(IE, UHXSE, 5, 1)
+ FIELD(IE, UHESE, 6, 1)
+ FIELD(IE, ULLSE, 7, 1)
+ FIELD(IE, ULSSE, 8, 1)
+ FIELD(IE, UTMRCE, 9, 1)
+ FIELD(IE, UCCE, 10, 1)
+ FIELD(IE, DFEE, 11, 1)
+ FIELD(IE, UTPEE, 12, 1)
+ FIELD(IE, HCFEE, 16, 1)
+ FIELD(IE, SBFEE, 17, 1)
+ FIELD(IE, CEFEE, 18, 1)
+REG32(HCS, offsetof(UfsReg, hcs))
+ FIELD(HCS, DP, 0, 1)
+ FIELD(HCS, UTRLRDY, 1, 1)
+ FIELD(HCS, UTMRLRDY, 2, 1)
+ FIELD(HCS, UCRDY, 3, 1)
+ FIELD(HCS, UPMCRS, 8, 3)
+REG32(HCE, offsetof(UfsReg, hce))
+ FIELD(HCE, HCE, 0, 1)
+ FIELD(HCE, CGE, 1, 1)
+REG32(UECPA, offsetof(UfsReg, uecpa))
+REG32(UECDL, offsetof(UfsReg, uecdl))
+REG32(UECN, offsetof(UfsReg, uecn))
+REG32(UECT, offsetof(UfsReg, uect))
+REG32(UECDME, offsetof(UfsReg, uecdme))
+REG32(UTRIACR, offsetof(UfsReg, utriacr))
+REG32(UTRLBA, offsetof(UfsReg, utrlba))
+ FIELD(UTRLBA, UTRLBA, 10, 22)
+REG32(UTRLBAU, offsetof(UfsReg, utrlbau))
+REG32(UTRLDBR, offsetof(UfsReg, utrldbr))
+REG32(UTRLCLR, offsetof(UfsReg, utrlclr))
+REG32(UTRLRSR, offsetof(UfsReg, utrlrsr))
+REG32(UTRLCNR, offsetof(UfsReg, utrlcnr))
+REG32(UTMRLBA, offsetof(UfsReg, utmrlba))
+ FIELD(UTMRLBA, UTMRLBA, 10, 22)
+REG32(UTMRLBAU, offsetof(UfsReg, utmrlbau))
+REG32(UTMRLDBR, offsetof(UfsReg, utmrldbr))
+REG32(UTMRLCLR, offsetof(UfsReg, utmrlclr))
+REG32(UTMRLRSR, offsetof(UfsReg, utmrlrsr))
+REG32(UICCMD, offsetof(UfsReg, uiccmd))
+REG32(UCMDARG1, offsetof(UfsReg, ucmdarg1))
+REG32(UCMDARG2, offsetof(UfsReg, ucmdarg2))
+REG32(UCMDARG3, offsetof(UfsReg, ucmdarg3))
+REG32(CCAP, offsetof(UfsReg, ccap))
+
+#define UFS_INTR_MASK \
+ ((1 << R_IS_CEFES_SHIFT) | (1 << R_IS_SBFES_SHIFT) | \
+ (1 << R_IS_HCFES_SHIFT) | (1 << R_IS_UTPES_SHIFT) | \
+ (1 << R_IS_DFES_SHIFT) | (1 << R_IS_UCCS_SHIFT) | \
+ (1 << R_IS_UTMRCS_SHIFT) | (1 << R_IS_ULSS_SHIFT) | \
+ (1 << R_IS_ULLS_SHIFT) | (1 << R_IS_UHES_SHIFT) | \
+ (1 << R_IS_UHXS_SHIFT) | (1 << R_IS_UPMS_SHIFT) | \
+ (1 << R_IS_UTMS_SHIFT) | (1 << R_IS_UE_SHIFT) | \
+ (1 << R_IS_UDEPRI_SHIFT) | (1 << R_IS_UTRCS_SHIFT))
+
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT 24
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK 0xff
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE(dword0) \
+ ((be32_to_cpu(dword0) >> UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT) & \
+ UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK)
+
+#define UFS_UPIU_HEADER_QUERY_FUNC_SHIFT 16
+#define UFS_UPIU_HEADER_QUERY_FUNC_MASK 0xff
+#define UFS_UPIU_HEADER_QUERY_FUNC(dword1) \
+ ((be32_to_cpu(dword1) >> UFS_UPIU_HEADER_QUERY_FUNC_SHIFT) & \
+ UFS_UPIU_HEADER_QUERY_FUNC_MASK)
+
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT 0
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK 0xffff
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH(dword2) \
+ ((be32_to_cpu(dword2) >> UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT) & \
+ UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK)
+
+typedef struct QEMU_PACKED DeviceDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t device;
+ uint8_t device_class;
+ uint8_t device_sub_class;
+ uint8_t protocol;
+ uint8_t number_lu;
+ uint8_t number_wlu;
+ uint8_t boot_enable;
+ uint8_t descr_access_en;
+ uint8_t init_power_mode;
+ uint8_t high_priority_lun;
+ uint8_t secure_removal_type;
+ uint8_t security_lu;
+ uint8_t background_ops_term_lat;
+ uint8_t init_active_icc_level;
+ uint16_t spec_version;
+ uint16_t manufacture_date;
+ uint8_t manufacturer_name;
+ uint8_t product_name;
+ uint8_t serial_number;
+ uint8_t oem_id;
+ uint16_t manufacturer_id;
+ uint8_t ud_0_base_offset;
+ uint8_t ud_config_p_length;
+ uint8_t device_rtt_cap;
+ uint16_t periodic_rtc_update;
+ uint8_t ufs_features_support;
+ uint8_t ffu_timeout;
+ uint8_t queue_depth;
+ uint16_t device_version;
+ uint8_t num_secure_wp_area;
+ uint32_t psa_max_data_size;
+ uint8_t psa_state_timeout;
+ uint8_t product_revision_level;
+ uint8_t reserved[36];
+ uint32_t extended_ufs_features_support;
+ uint8_t write_booster_buffer_preserve_user_space_en;
+ uint8_t write_booster_buffer_type;
+ uint32_t num_shared_write_booster_buffer_alloc_units;
+} DeviceDescriptor;
+
+typedef struct QEMU_PACKED GeometryDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t media_technology;
+ uint8_t reserved;
+ uint64_t total_raw_device_capacity;
+ uint8_t max_number_lu;
+ uint32_t segment_size;
+ uint8_t allocation_unit_size;
+ uint8_t min_addr_block_size;
+ uint8_t optimal_read_block_size;
+ uint8_t optimal_write_block_size;
+ uint8_t max_in_buffer_size;
+ uint8_t max_out_buffer_size;
+ uint8_t rpmb_read_write_size;
+ uint8_t dynamic_capacity_resource_policy;
+ uint8_t data_ordering;
+ uint8_t max_context_id_number;
+ uint8_t sys_data_tag_unit_size;
+ uint8_t sys_data_tag_res_size;
+ uint8_t supported_sec_r_types;
+ uint16_t supported_memory_types;
+ uint32_t system_code_max_n_alloc_u;
+ uint16_t system_code_cap_adj_fac;
+ uint32_t non_persist_max_n_alloc_u;
+ uint16_t non_persist_cap_adj_fac;
+ uint32_t enhanced_1_max_n_alloc_u;
+ uint16_t enhanced_1_cap_adj_fac;
+ uint32_t enhanced_2_max_n_alloc_u;
+ uint16_t enhanced_2_cap_adj_fac;
+ uint32_t enhanced_3_max_n_alloc_u;
+ uint16_t enhanced_3_cap_adj_fac;
+ uint32_t enhanced_4_max_n_alloc_u;
+ uint16_t enhanced_4_cap_adj_fac;
+ uint32_t optimal_logical_block_size;
+ uint8_t reserved2[7];
+ uint32_t write_booster_buffer_max_n_alloc_units;
+ uint8_t device_max_write_booster_l_us;
+ uint8_t write_booster_buffer_cap_adj_fac;
+ uint8_t supported_write_booster_buffer_user_space_reduction_types;
+ uint8_t supported_write_booster_buffer_types;
+} GeometryDescriptor;
+
+#define UFS_GEOMETRY_CAPACITY_SHIFT 9
+
+typedef struct QEMU_PACKED UnitDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t unit_index;
+ uint8_t lu_enable;
+ uint8_t boot_lun_id;
+ uint8_t lu_write_protect;
+ uint8_t lu_queue_depth;
+ uint8_t psa_sensitive;
+ uint8_t memory_type;
+ uint8_t data_reliability;
+ uint8_t logical_block_size;
+ uint64_t logical_block_count;
+ uint32_t erase_block_size;
+ uint8_t provisioning_type;
+ uint64_t phy_mem_resource_count;
+ uint16_t context_capabilities;
+ uint8_t large_unit_granularity_m1;
+ uint8_t reserved[6];
+ uint32_t lu_num_write_booster_buffer_alloc_units;
+} UnitDescriptor;
+
+typedef struct QEMU_PACKED RpmbUnitDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t unit_index;
+ uint8_t lu_enable;
+ uint8_t boot_lun_id;
+ uint8_t lu_write_protect;
+ uint8_t lu_queue_depth;
+ uint8_t psa_sensitive;
+ uint8_t memory_type;
+ uint8_t reserved;
+ uint8_t logical_block_size;
+ uint64_t logical_block_count;
+ uint32_t erase_block_size;
+ uint8_t provisioning_type;
+ uint64_t phy_mem_resource_count;
+ uint8_t reserved2[3];
+} RpmbUnitDescriptor;
+
+typedef struct QEMU_PACKED PowerParametersDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t active_icc_levels_vcc[16];
+ uint16_t active_icc_levels_vccq[16];
+ uint16_t active_icc_levels_vccq_2[16];
+} PowerParametersDescriptor;
+
+typedef struct QEMU_PACKED InterconnectDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t bcd_unipro_version;
+ uint16_t bcd_mphy_version;
+} InterconnectDescriptor;
+
+typedef struct QEMU_PACKED StringDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint16_t UC[126];
+} StringDescriptor;
+
+typedef struct QEMU_PACKED DeviceHealthDescriptor {
+ uint8_t length;
+ uint8_t descriptor_idn;
+ uint8_t pre_eol_info;
+ uint8_t device_life_time_est_a;
+ uint8_t device_life_time_est_b;
+ uint8_t vendor_prop_info[32];
+ uint32_t refresh_total_count;
+ uint32_t refresh_progress;
+} DeviceHealthDescriptor;
+
+typedef struct QEMU_PACKED Flags {
+ uint8_t reserved;
+ uint8_t device_init;
+ uint8_t permanent_wp_en;
+ uint8_t power_on_wp_en;
+ uint8_t background_ops_en;
+ uint8_t device_life_span_mode_en;
+ uint8_t purge_enable;
+ uint8_t refresh_enable;
+ uint8_t phy_resource_removal;
+ uint8_t busy_rtc;
+ uint8_t reserved2;
+ uint8_t permanently_disable_fw_update;
+ uint8_t reserved3[2];
+ uint8_t wb_en;
+ uint8_t wb_buffer_flush_en;
+ uint8_t wb_buffer_flush_during_hibernate;
+ uint8_t reserved4[2];
+} Flags;
+
+typedef struct Attributes {
+ uint8_t boot_lun_en;
+ uint8_t reserved;
+ uint8_t current_power_mode;
+ uint8_t active_icc_level;
+ uint8_t out_of_order_data_en;
+ uint8_t background_op_status;
+ uint8_t purge_status;
+ uint8_t max_data_in_size;
+ uint8_t max_data_out_size;
+ uint32_t dyn_cap_needed;
+ uint8_t ref_clk_freq;
+ uint8_t config_descr_lock;
+ uint8_t max_num_of_rtt;
+ uint16_t exception_event_control;
+ uint16_t exception_event_status;
+ uint32_t seconds_passed;
+ uint16_t context_conf;
+ uint8_t device_ffu_status;
+ uint8_t psa_state;
+ uint32_t psa_data_size;
+ uint8_t ref_clk_gating_wait_time;
+ uint8_t device_case_rough_temperaure;
+ uint8_t device_too_high_temp_boundary;
+ uint8_t device_too_low_temp_boundary;
+ uint8_t throttling_status;
+ uint8_t wb_buffer_flush_status;
+ uint8_t available_wb_buffer_size;
+ uint8_t wb_buffer_life_time_est;
+ uint32_t current_wb_buffer_size;
+ uint8_t refresh_status;
+ uint8_t refresh_freq;
+ uint8_t refresh_unit;
+ uint8_t refresh_method;
+} Attributes;
+
+#define UFS_TRANSACTION_SPECIFIC_FIELD_SIZE 20
+#define UFS_MAX_QUERY_DATA_SIZE 256
+
+/* Command response result code */
+typedef enum CommandRespCode {
+ UFS_COMMAND_RESULT_SUCCESS = 0x00,
+ UFS_COMMAND_RESULT_FAIL = 0x01,
+} CommandRespCode;
+
+enum {
+ UFS_UPIU_FLAG_UNDERFLOW = 0x20,
+ UFS_UPIU_FLAG_OVERFLOW = 0x40,
+};
+
+typedef struct QEMU_PACKED UtpUpiuHeader {
+ uint8_t trans_type;
+ uint8_t flags;
+ uint8_t lun;
+ uint8_t task_tag;
+ uint8_t iid_cmd_set_type;
+ uint8_t query_func;
+ uint8_t response;
+ uint8_t scsi_status;
+ uint8_t ehs_len;
+ uint8_t device_inf;
+ uint16_t data_segment_length;
+} UtpUpiuHeader;
+
+/*
+ * The code below is copied from the linux kernel
+ * ("include/uapi/scsi/scsi_bsg_ufs.h") and modified to fit the qemu style.
+ */
+
+typedef struct QEMU_PACKED UtpUpiuQuery {
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+ uint16_t reserved_osf;
+ uint16_t length;
+ uint32_t value;
+ uint32_t reserved[2];
+ /* EHS length should be 0. We don't have to worry about EHS area. */
+ uint8_t data[UFS_MAX_QUERY_DATA_SIZE];
+} UtpUpiuQuery;
+
+#define UFS_CDB_SIZE 16
+
+/*
+ * struct UtpUpiuCmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuCmd {
+ uint32_t exp_data_transfer_len;
+ uint8_t cdb[UFS_CDB_SIZE];
+} UtpUpiuCmd;
+
+/*
+ * struct UtpUpiuReq - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
+ */
+typedef struct QEMU_PACKED UtpUpiuReq {
+ UtpUpiuHeader header;
+ union {
+ UtpUpiuCmd sc;
+ UtpUpiuQuery qr;
+ };
+} UtpUpiuReq;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufshci.h") and
+ * modified to fit the qemu style.
+ */
+
+enum {
+ UFS_PWR_OK = 0x0,
+ UFS_PWR_LOCAL = 0x01,
+ UFS_PWR_REMOTE = 0x02,
+ UFS_PWR_BUSY = 0x03,
+ UFS_PWR_ERROR_CAP = 0x04,
+ UFS_PWR_FATAL_ERROR = 0x05,
+};
+
+/* UIC Commands */
+enum uic_cmd_dme {
+ UFS_UIC_CMD_DME_GET = 0x01,
+ UFS_UIC_CMD_DME_SET = 0x02,
+ UFS_UIC_CMD_DME_PEER_GET = 0x03,
+ UFS_UIC_CMD_DME_PEER_SET = 0x04,
+ UFS_UIC_CMD_DME_POWERON = 0x10,
+ UFS_UIC_CMD_DME_POWEROFF = 0x11,
+ UFS_UIC_CMD_DME_ENABLE = 0x12,
+ UFS_UIC_CMD_DME_RESET = 0x14,
+ UFS_UIC_CMD_DME_END_PT_RST = 0x15,
+ UFS_UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UFS_UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UFS_UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UFS_UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UFS_UIC_CMD_RESULT_SUCCESS = 0x00,
+ UFS_UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UFS_UIC_CMD_RESULT_FAILURE = 0x01,
+ UFS_UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UFS_UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UFS_UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UFS_UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UFS_UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UFS_UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UFS_UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UFS_UIC_CMD_RESULT_BUSY = 0x09,
+ UFS_UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define UFS_MASK_UIC_COMMAND_RESULT 0xFF
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UFS_UTP_CMD_TYPE_SCSI = 0x0,
+ UFS_UTP_CMD_TYPE_UFS = 0x1,
+ UFS_UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+/* To accommodate UFS2.0 required Command type */
+enum {
+ UFS_UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
+enum {
+ UFS_UTP_SCSI_COMMAND = 0x00000000,
+ UFS_UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UFS_UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UFS_UTP_REQ_DESC_INT_CMD = 0x01000000,
+ UFS_UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UFS_UTP_NO_DATA_TRANSFER = 0x00000000,
+ UFS_UTP_HOST_TO_DEVICE = 0x02000000,
+ UFS_UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum UtpOcsCodes {
+ UFS_OCS_SUCCESS = 0x0,
+ UFS_OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ UFS_OCS_INVALID_PRDT_ATTR = 0x2,
+ UFS_OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ UFS_OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ UFS_OCS_PEER_COMM_FAILURE = 0x5,
+ UFS_OCS_ABORTED = 0x6,
+ UFS_OCS_FATAL_ERROR = 0x7,
+ UFS_OCS_DEVICE_FATAL_ERROR = 0x8,
+ UFS_OCS_INVALID_CRYPTO_CONFIG = 0x9,
+ UFS_OCS_GENERAL_CRYPTO_ERROR = 0xa,
+ UFS_OCS_INVALID_COMMAND_STATUS = 0xf,
+};
+
+enum {
+ UFS_MASK_OCS = 0x0F,
+};
+
+/*
+ * struct UfshcdSgEntry - UFSHCI PRD Entry
+ * @addr: Physical address; DW-0 and DW-1.
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+typedef struct QEMU_PACKED UfshcdSgEntry {
+ uint64_t addr;
+ uint32_t reserved;
+ uint32_t size;
+ /*
+ * followed by variant-specific fields if
+ * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
+ */
+} UfshcdSgEntry;
+
+/*
+ * struct RequestDescHeader - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+typedef struct QEMU_PACKED RequestDescHeader {
+ uint32_t dword_0;
+ uint32_t dword_1;
+ uint32_t dword_2;
+ uint32_t dword_3;
+} RequestDescHeader;
+
+/*
+ * struct UtpTransferReqDesc - UTP Transfer Request Descriptor (UTRD)
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+typedef struct QEMU_PACKED UtpTransferReqDesc {
+ /* DW 0-3 */
+ RequestDescHeader header;
+
+ /* DW 4-5*/
+ uint32_t command_desc_base_addr_lo;
+ uint32_t command_desc_base_addr_hi;
+
+ /* DW 6 */
+ uint16_t response_upiu_length;
+ uint16_t response_upiu_offset;
+
+ /* DW 7 */
+ uint16_t prd_table_length;
+ uint16_t prd_table_offset;
+} UtpTransferReqDesc;
+
+/*
+ * UTMRD structure.
+ */
+typedef struct QEMU_PACKED UtpTaskReqDesc {
+ /* DW 0-3 */
+ RequestDescHeader header;
+
+ /* DW 4-11 - Task request UPIU structure */
+ struct {
+ UtpUpiuHeader req_header;
+ uint32_t input_param1;
+ uint32_t input_param2;
+ uint32_t input_param3;
+ uint32_t reserved1[2];
+ } upiu_req;
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct {
+ UtpUpiuHeader rsp_header;
+ uint32_t output_param1;
+ uint32_t output_param2;
+ uint32_t reserved2[3];
+ } upiu_rsp;
+} UtpTaskReqDesc;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufs.h") and
+ * modified to fit the qemu style.
+ */
+
+#define UFS_GENERAL_UPIU_REQUEST_SIZE (sizeof(UtpUpiuReq))
+#define UFS_QUERY_DESC_MAX_SIZE 255
+#define UFS_QUERY_DESC_MIN_SIZE 2
+#define UFS_QUERY_DESC_HDR_SIZE 2
+#define UFS_QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - (sizeof(UtpUpiuHeader)))
+#define UFS_SENSE_SIZE 18
+
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_UPIU_WLUN_ID (1 << 7)
+
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
+/*
+ * WriteBooster buffer lifetime has a limit set by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
+
+/*
+ * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU
+ * request/response packet
+ */
+#define UFS_EHS_OFFSET_IN_RESPONSE 32
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+ UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+ UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+ UFS_UPIU_BOOT_WLUN = 0xB0,
+ UFS_UPIU_RPMB_WLUN = 0xC4,
+};
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UFS_UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UFS_UPIU_TRANSACTION_COMMAND = 0x01,
+ UFS_UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UFS_UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UFS_UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UFS_UPIU_TRANSACTION_NOP_IN = 0x20,
+ UFS_UPIU_TRANSACTION_RESPONSE = 0x21,
+ UFS_UPIU_TRANSACTION_DATA_IN = 0x22,
+ UFS_UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UFS_UPIU_TRANSACTION_READY_XFER = 0x31,
+ UFS_UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UFS_UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UFS_UPIU_CMD_FLAGS_NONE = 0x00,
+ UFS_UPIU_CMD_FLAGS_WRITE = 0x20,
+ UFS_UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UFS_UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UFS_UPIU_TASK_ATTR_ORDERED = 0x01,
+ UFS_UPIU_TASK_ATTR_HEADQ = 0x02,
+ UFS_UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+ UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ UFS_QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ UFS_QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
+ UFS_QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+ UFS_QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+ UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
+ UFS_QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
+ UFS_QUERY_FLAG_IDN_REFRESH_ENABLE = 0x07,
+ UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
+ UFS_QUERY_FLAG_IDN_BUSY_RTC = 0x09,
+ UFS_QUERY_FLAG_IDN_RESERVED3 = 0x0A,
+ UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ UFS_QUERY_FLAG_IDN_WB_EN = 0x0E,
+ UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+ UFS_QUERY_FLAG_IDN_HPB_RESET = 0x11,
+ UFS_QUERY_FLAG_IDN_HPB_EN = 0x12,
+ UFS_QUERY_FLAG_IDN_COUNT,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ UFS_QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
+ UFS_QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
+ UFS_QUERY_ATTR_IDN_POWER_MODE = 0x02,
+ UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+ UFS_QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
+ UFS_QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ UFS_QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
+ UFS_QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
+ UFS_QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
+ UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
+ UFS_QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
+ UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
+ UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
+ UFS_QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ UFS_QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+ UFS_QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
+ UFS_QUERY_ATTR_IDN_CNTX_CONF = 0x10,
+ UFS_QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
+ UFS_QUERY_ATTR_IDN_RESERVED2 = 0x12,
+ UFS_QUERY_ATTR_IDN_RESERVED3 = 0x13,
+ UFS_QUERY_ATTR_IDN_FFU_STATUS = 0x14,
+ UFS_QUERY_ATTR_IDN_PSA_STATE = 0x15,
+ UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
+ UFS_QUERY_ATTR_IDN_THROTTLING_STATUS = 0x1B,
+ UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
+ UFS_QUERY_ATTR_IDN_REFRESH_STATUS = 0x2C,
+ UFS_QUERY_ATTR_IDN_REFRESH_FREQ = 0x2D,
+ UFS_QUERY_ATTR_IDN_REFRESH_UNIT = 0x2E,
+ UFS_QUERY_ATTR_IDN_COUNT,
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ UFS_QUERY_DESC_IDN_DEVICE = 0x0,
+ UFS_QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ UFS_QUERY_DESC_IDN_UNIT = 0x2,
+ UFS_QUERY_DESC_IDN_RFU_0 = 0x3,
+ UFS_QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ UFS_QUERY_DESC_IDN_STRING = 0x5,
+ UFS_QUERY_DESC_IDN_RFU_1 = 0x6,
+ UFS_QUERY_DESC_IDN_GEOMETRY = 0x7,
+ UFS_QUERY_DESC_IDN_POWER = 0x8,
+ UFS_QUERY_DESC_IDN_HEALTH = 0x9,
+ UFS_QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+ UFS_QUERY_DESC_LENGTH_OFFSET = 0x00,
+ UFS_QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+ UFS_UNIT_DESC_PARAM_LEN = 0x0,
+ UFS_UNIT_DESC_PARAM_TYPE = 0x1,
+ UFS_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UFS_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UFS_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UFS_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UFS_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UFS_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ UFS_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UFS_UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+ UFS_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UFS_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UFS_UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+ UFS_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UFS_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+ UFS_UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+ UFS_UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UFS_UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+ UFS_UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+ UFS_UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
+ UFS_UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
+};
+
+/* RPMB Unit descriptor parameters offsets in bytes*/
+enum rpmb_unit_desc_param {
+ UFS_RPMB_UNIT_DESC_PARAM_LEN = 0x0,
+ UFS_RPMB_UNIT_DESC_PARAM_TYPE = 0x1,
+ UFS_RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UFS_RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UFS_RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UFS_RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+ UFS_RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9,
+ UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15,
+ UFS_RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16,
+ UFS_RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UFS_RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+};
+
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ UFS_DEVICE_DESC_PARAM_LEN = 0x0,
+ UFS_DEVICE_DESC_PARAM_TYPE = 0x1,
+ UFS_DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ UFS_DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ UFS_DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ UFS_DEVICE_DESC_PARAM_PRTCL = 0x5,
+ UFS_DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ UFS_DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ UFS_DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ UFS_DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ UFS_DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ UFS_DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ UFS_DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ UFS_DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ UFS_DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ UFS_DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ UFS_DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ UFS_DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ UFS_DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ UFS_DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ UFS_DEVICE_DESC_PARAM_SN = 0x16,
+ UFS_DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ UFS_DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ UFS_DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ UFS_DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ UFS_DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ UFS_DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+ UFS_DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
+ UFS_DEVICE_DESC_PARAM_FFU_TMT = 0x20,
+ UFS_DEVICE_DESC_PARAM_Q_DPTH = 0x21,
+ UFS_DEVICE_DESC_PARAM_DEV_VER = 0x22,
+ UFS_DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
+ UFS_DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
+ UFS_DEVICE_DESC_PARAM_PSA_TMT = 0x29,
+ UFS_DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ UFS_DEVICE_DESC_PARAM_HPB_VER = 0x40,
+ UFS_DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
+ UFS_DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ UFS_DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ UFS_DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ UFS_DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
+};
+
+/* Interconnect descriptor parameters offsets in bytes*/
+enum interconnect_desc_param {
+ UFS_INTERCONNECT_DESC_PARAM_LEN = 0x0,
+ UFS_INTERCONNECT_DESC_PARAM_TYPE = 0x1,
+ UFS_INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
+ UFS_INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
+};
+
+/* Geometry descriptor parameters offsets in bytes*/
+enum geometry_desc_param {
+ UFS_GEOMETRY_DESC_PARAM_LEN = 0x0,
+ UFS_GEOMETRY_DESC_PARAM_TYPE = 0x1,
+ UFS_GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
+ UFS_GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
+ UFS_GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
+ UFS_GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
+ UFS_GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
+ UFS_GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
+ UFS_GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
+ UFS_GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
+ UFS_GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
+ UFS_GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
+ UFS_GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
+ UFS_GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
+ UFS_GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
+ UFS_GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
+ UFS_GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
+ UFS_GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
+ UFS_GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
+ UFS_GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
+ UFS_GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
+ UFS_GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
+ UFS_GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
+ UFS_GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
+ UFS_GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
+ UFS_GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
+ UFS_GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
+ UFS_GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
+ UFS_GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
+ UFS_GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
+ UFS_GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
+ UFS_GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ UFS_GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+ UFS_GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+ UFS_GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+ UFS_GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
+ UFS_GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ UFS_GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ UFS_GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ UFS_GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ UFS_GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
+};
+
+/* Health descriptor parameters offsets in bytes*/
+enum health_desc_param {
+ UFS_HEALTH_DESC_PARAM_LEN = 0x0,
+ UFS_HEALTH_DESC_PARAM_TYPE = 0x1,
+ UFS_HEALTH_DESC_PARAM_EOL_INFO = 0x2,
+ UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
+ UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
+};
+
+/* WriteBooster buffer mode */
+enum {
+ UFS_WB_BUF_MODE_LU_DEDICATED = 0x0,
+ UFS_WB_BUF_MODE_SHARED = 0x1,
+};
+
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+ UFS_LU_NO_WP = 0x00,
+ UFS_LU_POWER_ON_WP = 0x01,
+ UFS_LU_PERM_WP = 0x02,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+ UFS_UPIU_QUERY_OPCODE_NOP = 0x0,
+ UFS_UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UFS_UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UFS_UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UFS_UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UFS_UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UFS_UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* Query response result code */
+typedef enum QueryRespCode {
+ UFS_QUERY_RESULT_SUCCESS = 0x00,
+ UFS_QUERY_RESULT_NOT_READABLE = 0xF6,
+ UFS_QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ UFS_QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ UFS_QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ UFS_QUERY_RESULT_INVALID_VALUE = 0xFA,
+ UFS_QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ UFS_QUERY_RESULT_INVALID_INDEX = 0xFC,
+ UFS_QUERY_RESULT_INVALID_IDN = 0xFD,
+ UFS_QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ UFS_QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+} QueryRespCode;
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UFS_UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UFS_UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UFS_UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* Task management service response */
+enum {
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UFS_UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UFS_UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+ UFS_ACTIVE_PWR_MODE = 1,
+ UFS_SLEEP_PWR_MODE = 2,
+ UFS_POWERDOWN_PWR_MODE = 3,
+ UFS_DEEPSLEEP_PWR_MODE = 4,
+};
+
+/*
+ * struct UtpCmdRsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+typedef struct QEMU_PACKED UtpCmdRsp {
+ uint32_t residual_transfer_count;
+ uint32_t reserved[4];
+ uint16_t sense_data_len;
+ uint8_t sense_data[UFS_SENSE_SIZE];
+} UtpCmdRsp;
+
+/*
+ * struct UtpUpiuRsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuRsp {
+ UtpUpiuHeader header;
+ union {
+ UtpCmdRsp sr;
+ UtpUpiuQuery qr;
+ };
+} UtpUpiuRsp;
+
+static inline void _ufs_check_size(void)
+{
+ QEMU_BUILD_BUG_ON(sizeof(UfsReg) != 0x104);
+ QEMU_BUILD_BUG_ON(sizeof(DeviceDescriptor) != 89);
+ QEMU_BUILD_BUG_ON(sizeof(GeometryDescriptor) != 87);
+ QEMU_BUILD_BUG_ON(sizeof(UnitDescriptor) != 45);
+ QEMU_BUILD_BUG_ON(sizeof(RpmbUnitDescriptor) != 35);
+ QEMU_BUILD_BUG_ON(sizeof(PowerParametersDescriptor) != 98);
+ QEMU_BUILD_BUG_ON(sizeof(InterconnectDescriptor) != 6);
+ QEMU_BUILD_BUG_ON(sizeof(StringDescriptor) != 254);
+ QEMU_BUILD_BUG_ON(sizeof(DeviceHealthDescriptor) != 45);
+ QEMU_BUILD_BUG_ON(sizeof(Flags) != 0x13);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuHeader) != 12);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuQuery) != 276);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuCmd) != 20);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuReq) != 288);
+ QEMU_BUILD_BUG_ON(sizeof(UfshcdSgEntry) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(RequestDescHeader) != 16);
+ QEMU_BUILD_BUG_ON(sizeof(UtpTransferReqDesc) != 32);
+ QEMU_BUILD_BUG_ON(sizeof(UtpTaskReqDesc) != 80);
+ QEMU_BUILD_BUG_ON(sizeof(UtpCmdRsp) != 40);
+ QEMU_BUILD_BUG_ON(sizeof(UtpUpiuRsp) != 288);
+}
+#endif
diff --git a/include/block/write-threshold.h b/include/block/write-threshold.h
index c646f267a4..63d1583887 100644
--- a/include/block/write-threshold.h
+++ b/include/block/write-threshold.h
@@ -13,8 +13,6 @@
#ifndef BLOCK_WRITE_THRESHOLD_H
#define BLOCK_WRITE_THRESHOLD_H
-#include "block/block_int.h"
-
/*
* bdrv_write_threshold_set:
*
@@ -36,27 +34,12 @@ void bdrv_write_threshold_set(BlockDriverState *bs, uint64_t threshold_bytes);
uint64_t bdrv_write_threshold_get(const BlockDriverState *bs);
/*
- * bdrv_write_threshold_is_set
- *
- * Tell if a write threshold is set for a given BDS.
- */
-bool bdrv_write_threshold_is_set(const BlockDriverState *bs);
-
-/*
- * bdrv_write_threshold_exceeded
- *
- * Return the extent of a write request that exceeded the threshold,
- * or zero if the request is below the threshold.
- * Return zero also if the threshold was not set.
- *
- * NOTE: here we assume the following holds for each request this code
- * deals with:
- *
- * assert((req->offset + req->bytes) <= UINT64_MAX)
+ * bdrv_write_threshold_check_write
*
- * Please not there is *not* an actual C assert().
+ * Check whether the specified request exceeds the write threshold.
+ * If so, send a corresponding event and disable write threshold checking.
*/
-uint64_t bdrv_write_threshold_exceeded(const BlockDriverState *bs,
- const BdrvTrackedRequest *req);
+void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset,
+ int64_t bytes);
#endif
diff --git a/include/chardev/char-fd.h b/include/chardev/char-fd.h
index e7c2b176f9..9de0e440de 100644
--- a/include/chardev/char-fd.h
+++ b/include/chardev/char-fd.h
@@ -26,17 +26,20 @@
#include "io/channel.h"
#include "chardev/char.h"
+#include "qom/object.h"
-typedef struct FDChardev {
+struct FDChardev {
Chardev parent;
QIOChannel *ioc_in, *ioc_out;
int max_size;
-} FDChardev;
+};
+typedef struct FDChardev FDChardev;
#define TYPE_CHARDEV_FD "chardev-fd"
-#define FD_CHARDEV(obj) OBJECT_CHECK(FDChardev, (obj), TYPE_CHARDEV_FD)
+DECLARE_INSTANCE_CHECKER(FDChardev, FD_CHARDEV,
+ TYPE_CHARDEV_FD)
void qemu_chr_open_fd(Chardev *chr, int fd_in, int fd_out);
int qmp_chardev_open_file_source(char *src, int flags, Error **errp);
diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h
index a553843364..ecef182835 100644
--- a/include/chardev/char-fe.h
+++ b/include/chardev/char-fe.h
@@ -7,8 +7,12 @@
typedef void IOEventHandler(void *opaque, QEMUChrEvent event);
typedef int BackendChangeHandler(void *opaque);
-/* This is the backend as seen by frontend, the actual backend is
- * Chardev */
+/**
+ * struct CharBackend - back end as seen by front end
+ * @fe_is_open: the front end is ready for IO
+ *
+ * The actual backend is Chardev
+ */
struct CharBackend {
Chardev *chr;
IOEventHandler *chr_event;
@@ -17,7 +21,7 @@ struct CharBackend {
BackendChangeHandler *chr_be_change;
void *opaque;
int tag;
- int fe_open;
+ bool fe_is_open;
};
/**
@@ -78,7 +82,7 @@ bool qemu_chr_fe_backend_open(CharBackend *be);
* is not supported and will not be attempted
* @opaque: an opaque pointer for the callbacks
* @context: a main loop context or NULL for the default
- * @set_open: whether to call qemu_chr_fe_set_open() implicitely when
+ * @set_open: whether to call qemu_chr_fe_set_open() implicitly when
* any of the handler is non-NULL
* @sync_state: whether to issue event callback with updated state
*
@@ -138,7 +142,7 @@ void qemu_chr_fe_disconnect(CharBackend *be);
/**
* qemu_chr_fe_wait_connected:
*
- * Wait for characted backend to be connected, return < 0 on error or
+ * Wait for character backend to be connected, return < 0 on error or
* if no associated Chardev.
*/
int qemu_chr_fe_wait_connected(CharBackend *be, Error **errp);
@@ -156,12 +160,13 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo);
/**
* qemu_chr_fe_set_open:
+ * @be: a CharBackend
+ * @is_open: the front end open status
*
- * Set character frontend open status. This is an indication that the
- * front end is ready (or not) to begin doing I/O.
- * Without associated Chardev, do nothing.
+ * This is an indication that the front end is ready (or not) to begin
+ * doing I/O. Without associated Chardev, do nothing.
*/
-void qemu_chr_fe_set_open(CharBackend *be, int fe_open);
+void qemu_chr_fe_set_open(CharBackend *be, bool is_open);
/**
* qemu_chr_fe_printf:
@@ -172,7 +177,24 @@ void qemu_chr_fe_set_open(CharBackend *be, int fe_open);
* Chardev.
*/
void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
+
+
+/**
+ * FEWatchFunc: a #GSourceFunc called when any conditions requested by
+ * qemu_chr_fe_add_watch() is satisfied.
+ * @do_not_use: depending on the underlying chardev, a GIOChannel or a
+ * QIOChannel. DO NOT USE!
+ * @cond: bitwise combination of conditions watched and satisfied
+ * before calling this callback.
+ * @data: user data passed at creation to qemu_chr_fe_add_watch(). Can
+ * be NULL.
+ *
+ * Returns: G_SOURCE_REMOVE if the GSource should be removed from the
+ * main loop, or G_SOURCE_CONTINUE to leave the GSource in
+ * the main loop.
+ */
+typedef gboolean (*FEWatchFunc)(void *do_not_use, GIOCondition condition, void *data);
/**
* qemu_chr_fe_add_watch:
@@ -188,10 +210,13 @@ void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...)
* Note that you are responsible to update the front-end sources if
* you are switching the main context with qemu_chr_fe_set_handlers().
*
+ * Warning: DO NOT use the first callback argument (it may be either
+ * a GIOChannel or a QIOChannel, depending on the underlying chardev)
+ *
* Returns: the source tag
*/
guint qemu_chr_fe_add_watch(CharBackend *be, GIOCondition cond,
- GIOFunc func, void *user_data);
+ FEWatchFunc func, void *user_data);
/**
* qemu_chr_fe_write:
diff --git a/include/chardev/char-socket.h b/include/chardev/char-socket.h
new file mode 100644
index 0000000000..0708ca6fa9
--- /dev/null
+++ b/include/chardev/char-socket.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef CHAR_SOCKET_H
+#define CHAR_SOCKET_H
+
+#include "io/channel-socket.h"
+#include "io/channel-tls.h"
+#include "io/net-listener.h"
+#include "chardev/char.h"
+#include "qom/object.h"
+
+#define TCP_MAX_FDS 16
+
+typedef struct {
+ char buf[21];
+ size_t buflen;
+} TCPChardevTelnetInit;
+
+typedef enum {
+ TCP_CHARDEV_STATE_DISCONNECTED,
+ TCP_CHARDEV_STATE_CONNECTING,
+ TCP_CHARDEV_STATE_CONNECTED,
+} TCPChardevState;
+
+typedef ChardevClass SocketChardevClass;
+
+struct SocketChardev {
+ Chardev parent;
+ QIOChannel *ioc; /* Client I/O channel */
+ QIOChannelSocket *sioc; /* Client master channel */
+ QIONetListener *listener;
+ GSource *hup_source;
+ QCryptoTLSCreds *tls_creds;
+ char *tls_authz;
+ TCPChardevState state;
+ int max_size;
+ int do_telnetopt;
+ int do_nodelay;
+ int *read_msgfds;
+ size_t read_msgfds_num;
+ int *write_msgfds;
+ size_t write_msgfds_num;
+ bool registered_yank;
+
+ SocketAddress *addr;
+ bool is_listen;
+ bool is_telnet;
+ bool is_tn3270;
+ GSource *telnet_source;
+ TCPChardevTelnetInit *telnet_init;
+
+ bool is_websock;
+
+ GSource *reconnect_timer;
+ int64_t reconnect_time;
+ bool connect_err_reported;
+
+ QIOTask *connect_task;
+};
+typedef struct SocketChardev SocketChardev;
+
+DECLARE_INSTANCE_CHECKER(SocketChardev, SOCKET_CHARDEV,
+ TYPE_CHARDEV_SOCKET)
+
+#endif /* CHAR_SOCKET_H */
diff --git a/include/chardev/char-win.h b/include/chardev/char-win.h
index fa59e9e423..485521469c 100644
--- a/include/chardev/char-win.h
+++ b/include/chardev/char-win.h
@@ -25,8 +25,9 @@
#define CHAR_WIN_H
#include "chardev/char.h"
+#include "qom/object.h"
-typedef struct {
+struct WinChardev {
Chardev parent;
bool keep_open; /* console do not close file */
@@ -36,13 +37,15 @@ typedef struct {
/* Protected by the Chardev chr_write_lock. */
OVERLAPPED osend;
-} WinChardev;
+};
+typedef struct WinChardev WinChardev;
#define NSENDBUF 2048
#define NRECVBUF 2048
#define TYPE_CHARDEV_WIN "chardev-win"
-#define WIN_CHARDEV(obj) OBJECT_CHECK(WinChardev, (obj), TYPE_CHARDEV_WIN)
+DECLARE_INSTANCE_CHECKER(WinChardev, WIN_CHARDEV,
+ TYPE_CHARDEV_WIN)
void win_chr_set_file(Chardev *chr, HANDLE file, bool keep_open);
int win_chr_serial_init(Chardev *chr, const char *filename, Error **errp);
diff --git a/include/chardev/char.h b/include/chardev/char.h
index 00589a6025..01df55f9e8 100644
--- a/include/chardev/char.h
+++ b/include/chardev/char.h
@@ -65,6 +65,8 @@ struct Chardev {
char *filename;
int logfd;
int be_open;
+ /* used to coordinate the chardev-change special-case: */
+ bool handover_yank_instance;
GSource *gsource;
GMainContext *gcontext;
DECLARE_BITMAP(features, QEMU_CHAR_FEATURE_LAST);
@@ -184,7 +186,7 @@ int qemu_chr_be_can_write(Chardev *s);
* the caller should call @qemu_chr_be_can_write to determine how much data
* the front end can currently accept.
*/
-void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len);
+void qemu_chr_be_write(Chardev *s, const uint8_t *buf, int len);
/**
* qemu_chr_be_write_impl:
@@ -193,7 +195,7 @@ void qemu_chr_be_write(Chardev *s, uint8_t *buf, int len);
*
* Implementation of back end writing. Used by replay module.
*/
-void qemu_chr_be_write_impl(Chardev *s, uint8_t *buf, int len);
+void qemu_chr_be_write_impl(Chardev *s, const uint8_t *buf, int len);
/**
* qemu_chr_be_update_read_handlers:
@@ -226,11 +228,7 @@ int qemu_chr_write(Chardev *s, const uint8_t *buf, int len, bool write_all);
int qemu_chr_wait_connected(Chardev *chr, Error **errp);
#define TYPE_CHARDEV "chardev"
-#define CHARDEV(obj) OBJECT_CHECK(Chardev, (obj), TYPE_CHARDEV)
-#define CHARDEV_CLASS(klass) \
- OBJECT_CLASS_CHECK(ChardevClass, (klass), TYPE_CHARDEV)
-#define CHARDEV_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ChardevClass, (obj), TYPE_CHARDEV)
+OBJECT_DECLARE_TYPE(Chardev, ChardevClass, CHARDEV)
#define TYPE_CHARDEV_NULL "chardev-null"
#define TYPE_CHARDEV_MUX "chardev-mux"
@@ -251,32 +249,64 @@ int qemu_chr_wait_connected(Chardev *chr, Error **errp);
#define CHARDEV_IS_PTY(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_PTY)
-typedef struct ChardevClass {
+struct ChardevClass {
ObjectClass parent_class;
bool internal; /* TODO: eventually use TYPE_USER_CREATABLE */
+ bool supports_yank;
+
+ /* parse command line options and populate QAPI @backend */
void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp);
+ /* called after construction, open/starts the backend */
void (*open)(Chardev *chr, ChardevBackend *backend,
bool *be_opened, Error **errp);
+ /* write buf to the backend */
int (*chr_write)(Chardev *s, const uint8_t *buf, int len);
+
+ /*
+ * Read from the backend (blocking). A typical front-end will instead rely
+ * on chr_can_read/chr_read being called when polling/looping.
+ */
int (*chr_sync_read)(Chardev *s, const uint8_t *buf, int len);
+
+ /* create a watch on the backend */
GSource *(*chr_add_watch)(Chardev *s, GIOCondition cond);
+
+ /* update the backend internal sources */
void (*chr_update_read_handler)(Chardev *s);
+
+ /* send an ioctl to the backend */
int (*chr_ioctl)(Chardev *s, int cmd, void *arg);
+
+ /* get ancillary-received fds during last read */
int (*get_msgfds)(Chardev *s, int* fds, int num);
+
+ /* set ancillary fds to be sent with next write */
int (*set_msgfds)(Chardev *s, int *fds, int num);
+
+ /* accept the given fd */
int (*chr_add_client)(Chardev *chr, int fd);
+
+ /* wait for a connection */
int (*chr_wait_connected)(Chardev *chr, Error **errp);
+
+ /* disconnect a connection */
void (*chr_disconnect)(Chardev *chr);
+
+ /* called by frontend when it can read */
void (*chr_accept_input)(Chardev *chr);
+
+ /* set terminal echo */
void (*chr_set_echo)(Chardev *chr, bool echo);
+
+ /* notify the backend of frontend open state */
void (*chr_set_fe_open)(Chardev *chr, int fe_open);
+
+ /* handle various events */
void (*chr_be_event)(Chardev *s, QEMUChrEvent event);
- /* Return 0 if succeeded, 1 if failed */
- int (*chr_machine_done)(Chardev *chr);
-} ChardevClass;
+};
Chardev *qemu_chardev_new(const char *id, const char *typename,
ChardevBackend *backend, GMainContext *context,
@@ -287,7 +317,7 @@ extern int term_escape_char;
GSource *qemu_chr_timeout_add_ms(Chardev *chr, guint ms,
GSourceFunc func, void *private);
-/* console.c */
-void qemu_chr_parse_vc(QemuOpts *opts, ChardevBackend *backend, Error **errp);
+void suspend_mux_open(void);
+void resume_mux_open(void);
#endif
diff --git a/include/chardev/spice.h b/include/chardev/spice.h
index 1f7339b649..58e5b727e9 100644
--- a/include/chardev/spice.h
+++ b/include/chardev/spice.h
@@ -3,8 +3,9 @@
#include <spice.h>
#include "chardev/char-fe.h"
+#include "qom/object.h"
-typedef struct SpiceChardev {
+struct SpiceChardev {
Chardev parent;
SpiceCharDeviceInstance sin;
@@ -12,16 +13,14 @@ typedef struct SpiceChardev {
bool blocked;
const uint8_t *datapos;
int datalen;
- QLIST_ENTRY(SpiceChardev) next;
-} SpiceChardev;
+};
+typedef struct SpiceChardev SpiceChardev;
#define TYPE_CHARDEV_SPICE "chardev-spice"
#define TYPE_CHARDEV_SPICEVMC "chardev-spicevmc"
#define TYPE_CHARDEV_SPICEPORT "chardev-spiceport"
-#define SPICE_CHARDEV(obj) OBJECT_CHECK(SpiceChardev, (obj), TYPE_CHARDEV_SPICE)
-
-void qemu_chr_open_spice_port(Chardev *chr, ChardevBackend *backend,
- bool *be_opened, Error **errp);
+DECLARE_INSTANCE_CHECKER(SpiceChardev, SPICE_CHARDEV,
+ TYPE_CHARDEV_SPICE)
#endif
diff --git a/include/crypto/aes-round.h b/include/crypto/aes-round.h
new file mode 100644
index 0000000000..854fb0966a
--- /dev/null
+++ b/include/crypto/aes-round.h
@@ -0,0 +1,164 @@
+/*
+ * AES round fragments, generic version
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_AES_ROUND_H
+#define CRYPTO_AES_ROUND_H
+
+/* Hosts with acceleration will usually need a 16-byte vector type. */
+typedef uint8_t AESStateVec __attribute__((vector_size(16)));
+
+typedef union {
+ uint8_t b[16];
+ uint32_t w[4];
+ uint64_t d[2];
+ AESStateVec v;
+} AESState;
+
+#include "host/crypto/aes-round.h"
+
+/*
+ * Perform MixColumns.
+ */
+
+void aesenc_MC_gen(AESState *ret, const AESState *st);
+void aesenc_MC_genrev(AESState *ret, const AESState *st);
+
+static inline void aesenc_MC(AESState *r, const AESState *st, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_MC_accel(r, st, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_MC_gen(r, st);
+ } else {
+ aesenc_MC_genrev(r, st);
+ }
+}
+
+/*
+ * Perform SubBytes + ShiftRows + AddRoundKey.
+ */
+
+void aesenc_SB_SR_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesenc_SB_SR_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_SB_SR_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_SB_SR_AK_gen(r, st, rk);
+ } else {
+ aesenc_SB_SR_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform SubBytes + ShiftRows + MixColumns + AddRoundKey.
+ */
+
+void aesenc_SB_SR_MC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesenc_SB_SR_MC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesenc_SB_SR_MC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesenc_SB_SR_MC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesenc_SB_SR_MC_AK_gen(r, st, rk);
+ } else {
+ aesenc_SB_SR_MC_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvMixColumns.
+ */
+
+void aesdec_IMC_gen(AESState *ret, const AESState *st);
+void aesdec_IMC_genrev(AESState *ret, const AESState *st);
+
+static inline void aesdec_IMC(AESState *r, const AESState *st, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_IMC_accel(r, st, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_IMC_gen(r, st);
+ } else {
+ aesdec_IMC_genrev(r, st);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + AddRoundKey.
+ */
+
+void aesdec_ISB_ISR_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_AK_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + AddRoundKey + InvMixColumns.
+ */
+
+void aesdec_ISB_ISR_AK_IMC_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_AK_IMC_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_AK_IMC(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_AK_IMC_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_AK_IMC_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_AK_IMC_genrev(r, st, rk);
+ }
+}
+
+/*
+ * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
+ */
+
+void aesdec_ISB_ISR_IMC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_IMC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_IMC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_IMC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_IMC_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_IMC_AK_genrev(r, st, rk);
+ }
+}
+
+#endif /* CRYPTO_AES_ROUND_H */
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 12fb321b89..381f24c902 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -16,52 +16,25 @@ typedef struct aes_key_st AES_KEY;
#define AES_set_decrypt_key QEMU_AES_set_decrypt_key
#define AES_encrypt QEMU_AES_encrypt
#define AES_decrypt QEMU_AES_decrypt
-#define AES_cbc_encrypt QEMU_AES_cbc_encrypt
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
void AES_encrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
+ const AES_KEY *key);
void AES_decrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
-void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
- const unsigned long length, const AES_KEY *key,
- unsigned char *ivec, const int enc);
+ const AES_KEY *key);
extern const uint8_t AES_sbox[256];
extern const uint8_t AES_isbox[256];
-/* AES ShiftRows and InvShiftRows */
-extern const uint8_t AES_shifts[16];
-extern const uint8_t AES_ishifts[16];
-
-/* AES InvMixColumns */
-/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */
-/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */
-/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */
-/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */
-extern const uint32_t AES_imc[256][4];
-
/*
AES_Te0[x] = S [x].[02, 01, 01, 03];
-AES_Te1[x] = S [x].[03, 02, 01, 01];
-AES_Te2[x] = S [x].[01, 03, 02, 01];
-AES_Te3[x] = S [x].[01, 01, 03, 02];
-AES_Te4[x] = S [x].[01, 01, 01, 01];
-
AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
-AES_Td1[x] = Si[x].[0b, 0e, 09, 0d];
-AES_Td2[x] = Si[x].[0d, 0b, 0e, 09];
-AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
-AES_Td4[x] = Si[x].[01, 01, 01, 01];
*/
-extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256],
- AES_Te3[256], AES_Te4[256];
-extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256],
- AES_Td3[256], AES_Td4[256];
+extern const uint32_t AES_Te0[256], AES_Td0[256];
#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 0000000000..8756105f22
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,179 @@
+/*
+ * QEMU Crypto asymmetric algorithms
+ *
+ * Copyright (c) 2022 Bytedance
+ * Author: zhenwei pi <pizhenwei@bytedance.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QCRYPTO_AKCIPHER_H
+#define QCRYPTO_AKCIPHER_H
+
+#include "qapi/qapi-types-crypto.h"
+
+typedef struct QCryptoAkCipher QCryptoAkCipher;
+
+/**
+ * qcrypto_akcipher_supports:
+ * @opts: the asymmetric key algorithm and related options
+ *
+ * Determine if asymmetric key cipher described with @opts is
+ * supported by the current configured build
+ *
+ * Returns: true if it is supported, false otherwise.
+ */
+bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts);
+
+/**
+ * qcrypto_akcipher_new:
+ * @opts: specify the algorithm and the related arguments
+ * @type: private or public key type
+ * @key: buffer to store the key
+ * @key_len: the length of key buffer
+ * @errp: error pointer
+ *
+ * Create akcipher context
+ *
+ * Returns: On success, a new QCryptoAkCipher initialized with @opt
+ * is created and returned, otherwise NULL is returned.
+ */
+
+QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts,
+ QCryptoAkCipherKeyType type,
+ const uint8_t *key, size_t key_len,
+ Error **errp);
+
+/**
+ * qcrypto_akcipher_encrypt:
+ * @akcipher: akcipher context
+ * @in: plaintext pending to be encrypted
+ * @in_len: length of plaintext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_plaintext_len()
+ * @out: buffer to store the ciphertext
+ * @out_len: length of ciphertext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_ciphertext_len()
+ * @errp: error pointer
+ *
+ * Encrypt @in and write ciphertext into @out
+ *
+ * Returns: length of ciphertext if encrypt succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_encrypt(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_decrypt:
+ * @akcipher: akcipher context
+ * @in: ciphertext to be decrypted
+ * @in_len: the length of ciphertext, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_ciphertext_len()
+ * @out: buffer to store the plaintext
+ * @out_len: length of the plaintext buffer, less or equal to the size
+ * reported by a call to qcrypto_akcipher_max_plaintext_len()
+ * @errp: error pointer
+ *
+ * Decrypt @in and write plaintext into @out
+ *
+ * Returns: length of plaintext if decrypt succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_decrypt(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_sign:
+ * @akcipher: akcipher context
+ * @in: data to be signed
+ * @in_len: the length of data, less or equal to the size reported
+ * by a call to qcrypto_akcipher_max_dgst_len()
+ * @out: buffer to store the signature
+ * @out_len: length of the signature buffer, less or equal to the size
+ * by a call to qcrypto_akcipher_max_signature_len()
+ * @errp: error pointer
+ *
+ * Generate signature for @in, write into @out
+ *
+ * Returns: length of signature if succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_sign(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ void *out, size_t out_len, Error **errp);
+
+/**
+ * qcrypto_akcipher_verify:
+ * @akcipher: akcipher context
+ * @in: pointer to the signature
+ * @in_len: length of signature, ess or equal to the size reported
+ * by a call to qcrypto_akcipher_max_signature_len()
+ * @in2: pointer to original data
+ * @in2_len: the length of original data, less or equal to the size
+ * by a call to qcrypto_akcipher_max_dgst_len()
+ * @errp: error pointer
+ *
+ * Verify @in and @in2 match or not
+ *
+ * Returns: 0 for succeed,
+ * otherwise -1 is returned
+ */
+int qcrypto_akcipher_verify(QCryptoAkCipher *akcipher,
+ const void *in, size_t in_len,
+ const void *in2, size_t in2_len, Error **errp);
+
+int qcrypto_akcipher_max_plaintext_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_ciphertext_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_signature_len(QCryptoAkCipher *akcipher);
+
+int qcrypto_akcipher_max_dgst_len(QCryptoAkCipher *akcipher);
+
+/**
+ * qcrypto_akcipher_free:
+ * @akcipher: akcipher context
+ *
+ * Free the akcipher context
+ *
+ */
+void qcrypto_akcipher_free(QCryptoAkCipher *akcipher);
+
+/**
+ * qcrypto_akcipher_export_p8info:
+ * @opts: the options of the akcipher to be exported.
+ * @key: the original key of the akcipher to be exported.
+ * @keylen: length of the 'key'
+ * @dst: output parameter, if export succeed, *dst is set to the
+ * PKCS#8 encoded private key, caller MUST free this key with
+ * g_free after use.
+ * @dst_len: output parameter, indicates the length of PKCS#8 encoded
+ * key.
+ *
+ * Export the akcipher into DER encoded pkcs#8 private key info, expects
+ * |key| stores a valid asymmetric PRIVATE key.
+ *
+ * Returns: 0 for succeed, otherwise -1 is returned.
+ */
+int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
+ uint8_t *key, size_t keylen,
+ uint8_t **dst, size_t *dst_len,
+ Error **errp);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipher, qcrypto_akcipher_free)
+
+#endif /* QCRYPTO_AKCIPHER_H */
diff --git a/include/crypto/block.h b/include/crypto/block.h
index d274819791..92e823c9f2 100644
--- a/include/crypto/block.h
+++ b/include/crypto/block.h
@@ -29,24 +29,24 @@ typedef struct QCryptoBlock QCryptoBlock;
/* See also QCryptoBlockFormat, QCryptoBlockCreateOptions
* and QCryptoBlockOpenOptions in qapi/crypto.json */
-typedef ssize_t (*QCryptoBlockReadFunc)(QCryptoBlock *block,
- size_t offset,
- uint8_t *buf,
- size_t buflen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockReadFunc)(QCryptoBlock *block,
+ size_t offset,
+ uint8_t *buf,
+ size_t buflen,
+ void *opaque,
+ Error **errp);
-typedef ssize_t (*QCryptoBlockInitFunc)(QCryptoBlock *block,
- size_t headerlen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockInitFunc)(QCryptoBlock *block,
+ size_t headerlen,
+ void *opaque,
+ Error **errp);
-typedef ssize_t (*QCryptoBlockWriteFunc)(QCryptoBlock *block,
- size_t offset,
- const uint8_t *buf,
- size_t buflen,
- void *opaque,
- Error **errp);
+typedef int (*QCryptoBlockWriteFunc)(QCryptoBlock *block,
+ size_t offset,
+ const uint8_t *buf,
+ size_t buflen,
+ void *opaque,
+ Error **errp);
/**
* qcrypto_block_has_format:
@@ -66,6 +66,7 @@ bool qcrypto_block_has_format(QCryptoBlockFormat format,
typedef enum {
QCRYPTO_BLOCK_OPEN_NO_IO = (1 << 0),
+ QCRYPTO_BLOCK_OPEN_DETACHED = (1 << 1),
} QCryptoBlockOpenFlags;
/**
@@ -95,6 +96,10 @@ typedef enum {
* metadata such as the payload offset. There will be
* no cipher or ivgen objects available.
*
+ * If @flags contains QCRYPTO_BLOCK_OPEN_DETACHED then
+ * the open process will be optimized to skip the LUKS
+ * payload overlap check.
+ *
* If any part of initializing the encryption context
* fails an error will be returned. This could be due
* to the volume being in the wrong format, a cipher
@@ -111,6 +116,10 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
size_t n_threads,
Error **errp);
+typedef enum {
+ QCRYPTO_BLOCK_CREATE_DETACHED = (1 << 0),
+} QCryptoBlockCreateFlags;
+
/**
* qcrypto_block_create:
* @options: the encryption options
@@ -118,6 +127,7 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
* @initfunc: callback for initializing volume header
* @writefunc: callback for writing data to the volume header
* @opaque: data to pass to @initfunc and @writefunc
+ * @flags: bitmask of QCryptoBlockCreateFlags values
* @errp: pointer to a NULL-initialized error object
*
* Create a new block encryption object for initializing
@@ -129,6 +139,11 @@ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options,
* generating new master keys, etc as required. Any existing
* data present on the volume will be irrevocably destroyed.
*
+ * If @flags contains QCRYPTO_BLOCK_CREATE_DETACHED then
+ * the open process will set the payload_offset_sector to 0
+ * to specify the starting point for the read/write of a
+ * detached LUKS header image.
+ *
* If any part of initializing the encryption context
* fails an error will be returned. This could be due
* to the volume being in the wrong format, a cipher
@@ -142,6 +157,7 @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
QCryptoBlockInitFunc initfunc,
QCryptoBlockWriteFunc writefunc,
void *opaque,
+ unsigned int flags,
Error **errp);
/**
@@ -311,7 +327,5 @@ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block);
void qcrypto_block_free(QCryptoBlock *block);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free)
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlockCreateOptions,
- qapi_free_QCryptoBlockCreateOptions)
#endif /* QCRYPTO_BLOCK_H */
diff --git a/include/crypto/cipher.h b/include/crypto/cipher.h
index 5928e5ecc7..083e12a7d9 100644
--- a/include/crypto/cipher.h
+++ b/include/crypto/cipher.h
@@ -24,6 +24,7 @@
#include "qapi/qapi-types-crypto.h"
typedef struct QCryptoCipher QCryptoCipher;
+typedef struct QCryptoCipherDriver QCryptoCipherDriver;
/* See also "QCryptoCipherAlgorithm" and "QCryptoCipherMode"
* enums defined in qapi/crypto.json */
@@ -79,8 +80,7 @@ typedef struct QCryptoCipher QCryptoCipher;
struct QCryptoCipher {
QCryptoCipherAlgorithm alg;
QCryptoCipherMode mode;
- void *opaque;
- void *driver;
+ const QCryptoCipherDriver *driver;
};
/**
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
new file mode 100644
index 0000000000..446931fe05
--- /dev/null
+++ b/include/crypto/clmul.h
@@ -0,0 +1,83 @@
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_CLMUL_H
+#define CRYPTO_CLMUL_H
+
+#include "qemu/int128.h"
+#include "host/crypto/clmul.h"
+
+/**
+ * clmul_8x8_low:
+ *
+ * Perform eight 8x8->8 carry-less multiplies.
+ */
+uint64_t clmul_8x8_low(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_even:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_even(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_odd:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_odd(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_packed:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ */
+uint64_t clmul_8x4_packed(uint32_t, uint32_t);
+
+/**
+ * clmul_16x2_even:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The odd words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_even(uint64_t, uint64_t);
+
+/**
+ * clmul_16x2_odd:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The even words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_odd(uint64_t, uint64_t);
+
+/**
+ * clmul_32:
+ *
+ * Perform a 32x32->64 carry-less multiply.
+ */
+uint64_t clmul_32(uint32_t, uint32_t);
+
+/**
+ * clmul_64:
+ *
+ * Perform a 64x64->128 carry-less multiply.
+ */
+Int128 clmul_64_gen(uint64_t, uint64_t);
+
+static inline Int128 clmul_64(uint64_t a, uint64_t b)
+{
+ if (HAVE_CLMUL_ACCEL) {
+ return clmul_64_accel(a, b);
+ } else {
+ return clmul_64_gen(a, b);
+ }
+}
+
+#endif /* CRYPTO_CLMUL_H */
diff --git a/include/crypto/desrfb.h b/include/crypto/desrfb.h
index 7ca596c387..af8d12234d 100644
--- a/include/crypto/desrfb.h
+++ b/include/crypto/desrfb.h
@@ -15,30 +15,30 @@
/* d3des.h -
*
- * Headers and defines for d3des.c
- * Graven Imagery, 1992.
+ * Headers and defines for d3des.c
+ * Graven Imagery, 1992.
*
* Copyright (c) 1988,1989,1990,1991,1992 by Richard Outerbridge
- * (GEnie : OUTER; CIS : [71755,204])
+ * (GEnie : OUTER; CIS : [71755,204])
*/
-#define EN0 0 /* MODE == encrypt */
-#define DE1 1 /* MODE == decrypt */
+#define EN0 0 /* MODE == encrypt */
+#define DE1 1 /* MODE == decrypt */
void deskey(unsigned char *, int);
-/* hexkey[8] MODE
+/* hexkey[8] MODE
* Sets the internal key register according to the hexadecimal
* key contained in the 8 bytes of hexkey, according to the DES,
* for encryption or decryption according to MODE.
*/
void usekey(unsigned long *);
-/* cookedkey[32]
+/* cookedkey[32]
* Loads the internal key register with the data in cookedkey.
*/
void des(unsigned char *, unsigned char *);
-/* from[8] to[8]
+/* from[8] to[8]
* Encrypts/Decrypts (according to the key currently loaded in the
* internal key register) one block of eight bytes at address 'from'
* into the block at address 'to'. They can be the same.
diff --git a/include/crypto/ivgen.h b/include/crypto/ivgen.h
index e41521519c..a09d5732da 100644
--- a/include/crypto/ivgen.h
+++ b/include/crypto/ivgen.h
@@ -32,7 +32,7 @@
* sector.
*
* <example>
- * <title>Encrypting block data with initialiation vectors</title>
+ * <title>Encrypting block data with initialization vectors</title>
* <programlisting>
* uint8_t *data = ....data to encrypt...
* size_t ndata = XXX;
@@ -147,7 +147,7 @@ QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg,
* @niv: the number of bytes in @iv
* @errp: pointer to a NULL-initialized error object
*
- * Calculate a new initialiation vector for the data
+ * Calculate a new initialization vector for the data
* to be stored in sector @sector. The IV will be
* written into the buffer @iv of size @niv.
*
diff --git a/include/crypto/secret.h b/include/crypto/secret.h
index 2deb461d2f..5d20ae6d2f 100644
--- a/include/crypto/secret.h
+++ b/include/crypto/secret.h
@@ -26,10 +26,10 @@
#include "crypto/secret_common.h"
#define TYPE_QCRYPTO_SECRET "secret"
-#define QCRYPTO_SECRET(obj) \
- OBJECT_CHECK(QCryptoSecret, (obj), TYPE_QCRYPTO_SECRET)
-
typedef struct QCryptoSecret QCryptoSecret;
+DECLARE_INSTANCE_CHECKER(QCryptoSecret, QCRYPTO_SECRET,
+ TYPE_QCRYPTO_SECRET)
+
typedef struct QCryptoSecretClass QCryptoSecretClass;
/**
diff --git a/include/crypto/secret_common.h b/include/crypto/secret_common.h
index 980c02ab71..a0a22e1abd 100644
--- a/include/crypto/secret_common.h
+++ b/include/crypto/secret_common.h
@@ -25,17 +25,9 @@
#include "qom/object.h"
#define TYPE_QCRYPTO_SECRET_COMMON "secret_common"
-#define QCRYPTO_SECRET_COMMON(obj) \
- OBJECT_CHECK(QCryptoSecretCommon, (obj), TYPE_QCRYPTO_SECRET_COMMON)
-#define QCRYPTO_SECRET_COMMON_CLASS(class) \
- OBJECT_CLASS_CHECK(QCryptoSecretCommonClass, \
- (class), TYPE_QCRYPTO_SECRET_COMMON)
-#define QCRYPTO_SECRET_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QCryptoSecretCommonClass, \
- (obj), TYPE_QCRYPTO_SECRET_COMMON)
+OBJECT_DECLARE_TYPE(QCryptoSecretCommon, QCryptoSecretCommonClass,
+ QCRYPTO_SECRET_COMMON)
-typedef struct QCryptoSecretCommon QCryptoSecretCommon;
-typedef struct QCryptoSecretCommonClass QCryptoSecretCommonClass;
struct QCryptoSecretCommon {
Object parent_obj;
@@ -56,13 +48,11 @@ struct QCryptoSecretCommonClass {
};
-extern int qcrypto_secret_lookup(const char *secretid,
- uint8_t **data,
- size_t *datalen,
- Error **errp);
-extern char *qcrypto_secret_lookup_as_utf8(const char *secretid,
- Error **errp);
-extern char *qcrypto_secret_lookup_as_base64(const char *secretid,
- Error **errp);
+int qcrypto_secret_lookup(const char *secretid,
+ uint8_t **data,
+ size_t *datalen,
+ Error **errp);
+char *qcrypto_secret_lookup_as_utf8(const char *secretid, Error **errp);
+char *qcrypto_secret_lookup_as_base64(const char *secretid, Error **errp);
#endif /* QCRYPTO_SECRET_COMMON_H */
diff --git a/include/crypto/secret_keyring.h b/include/crypto/secret_keyring.h
index 9f371ad251..3758852cb8 100644
--- a/include/crypto/secret_keyring.h
+++ b/include/crypto/secret_keyring.h
@@ -26,27 +26,15 @@
#include "crypto/secret_common.h"
#define TYPE_QCRYPTO_SECRET_KEYRING "secret_keyring"
-#define QCRYPTO_SECRET_KEYRING(obj) \
- OBJECT_CHECK(QCryptoSecretKeyring, (obj), \
- TYPE_QCRYPTO_SECRET_KEYRING)
-#define QCRYPTO_SECRET_KEYRING_CLASS(class) \
- OBJECT_CLASS_CHECK(QCryptoSecretKeyringClass, \
- (class), TYPE_QCRYPTO_SECRET_KEYRING)
-#define QCRYPTO_SECRET_KEYRING_GET_CLASS(class) \
- OBJECT_GET_CLASS(QCryptoSecretKeyringClass, \
- (class), TYPE_QCRYPTO_SECRET_KEYRING)
-
-typedef struct QCryptoSecretKeyring QCryptoSecretKeyring;
-typedef struct QCryptoSecretKeyringClass QCryptoSecretKeyringClass;
-
-typedef struct QCryptoSecretKeyring {
+OBJECT_DECLARE_SIMPLE_TYPE(QCryptoSecretKeyring,
+ QCRYPTO_SECRET_KEYRING)
+
+
+struct QCryptoSecretKeyring {
QCryptoSecretCommon parent;
int32_t serial;
-} QCryptoSecretKeyring;
+};
-typedef struct QCryptoSecretKeyringClass {
- QCryptoSecretCommonClass parent;
-} QCryptoSecretKeyringClass;
#endif /* QCRYPTO_SECRET_KEYRING_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 0000000000..382b26d922
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,15 @@
+#ifndef QEMU_SM4_H
+#define QEMU_SM4_H
+
+extern const uint8_t sm4_sbox[256];
+extern const uint32_t sm4_ck[32];
+
+static inline uint32_t sm4_subword(uint32_t word)
+{
+ return sm4_sbox[word & 0xff] |
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
+ sm4_sbox[(word >> 24) & 0xff] << 24;
+}
+
+#endif
diff --git a/include/crypto/tls-cipher-suites.h b/include/crypto/tls-cipher-suites.h
index 28b3a73ce1..3bd2003f32 100644
--- a/include/crypto/tls-cipher-suites.h
+++ b/include/crypto/tls-cipher-suites.h
@@ -8,21 +8,16 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef QCRYPTO_TLSCIPHERSUITES_H
-#define QCRYPTO_TLSCIPHERSUITES_H
+#ifndef QCRYPTO_TLS_CIPHER_SUITES_H
+#define QCRYPTO_TLS_CIPHER_SUITES_H
#include "qom/object.h"
#include "crypto/tlscreds.h"
#define TYPE_QCRYPTO_TLS_CIPHER_SUITES "tls-cipher-suites"
-#define QCRYPTO_TLS_CIPHER_SUITES(obj) \
- OBJECT_CHECK(QCryptoTLSCipherSuites, (obj), TYPE_QCRYPTO_TLS_CIPHER_SUITES)
-
-typedef struct QCryptoTLSCipherSuites {
- /* <private> */
- QCryptoTLSCreds parent_obj;
- /* <public> */
-} QCryptoTLSCipherSuites;
+typedef struct QCryptoTLSCipherSuites QCryptoTLSCipherSuites;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCipherSuites, QCRYPTO_TLS_CIPHER_SUITES,
+ TYPE_QCRYPTO_TLS_CIPHER_SUITES)
/**
* qcrypto_tls_cipher_suites_get_data:
@@ -36,4 +31,4 @@ typedef struct QCryptoTLSCipherSuites {
GByteArray *qcrypto_tls_cipher_suites_get_data(QCryptoTLSCipherSuites *obj,
Error **errp);
-#endif /* QCRYPTO_TLSCIPHERSUITES_H */
+#endif /* QCRYPTO_TLS_CIPHER_SUITES_H */
diff --git a/include/crypto/tlscreds.h b/include/crypto/tlscreds.h
index fd7a284aa2..2a8a857010 100644
--- a/include/crypto/tlscreds.h
+++ b/include/crypto/tlscreds.h
@@ -24,20 +24,17 @@
#include "qapi/qapi-types-crypto.h"
#include "qom/object.h"
-#ifdef CONFIG_GNUTLS
-#include <gnutls/gnutls.h>
-#endif
-
#define TYPE_QCRYPTO_TLS_CREDS "tls-creds"
-#define QCRYPTO_TLS_CREDS(obj) \
- OBJECT_CHECK(QCryptoTLSCreds, (obj), TYPE_QCRYPTO_TLS_CREDS)
-
typedef struct QCryptoTLSCreds QCryptoTLSCreds;
typedef struct QCryptoTLSCredsClass QCryptoTLSCredsClass;
+DECLARE_OBJ_CHECKERS(QCryptoTLSCreds, QCryptoTLSCredsClass, QCRYPTO_TLS_CREDS,
+ TYPE_QCRYPTO_TLS_CREDS)
+
#define QCRYPTO_TLS_CREDS_DH_PARAMS "dh-params.pem"
+typedef bool (*CryptoTLSCredsReload)(QCryptoTLSCreds *, Error **);
/**
* QCryptoTLSCreds:
*
@@ -47,21 +44,24 @@ typedef struct QCryptoTLSCredsClass QCryptoTLSCredsClass;
* certificate credentials.
*/
-struct QCryptoTLSCreds {
- Object parent_obj;
- char *dir;
- QCryptoTLSCredsEndpoint endpoint;
-#ifdef CONFIG_GNUTLS
- gnutls_dh_params_t dh_params;
-#endif
- bool verifyPeer;
- char *priority;
-};
-
-
struct QCryptoTLSCredsClass {
ObjectClass parent_class;
+ CryptoTLSCredsReload reload;
};
+/**
+ * qcrypto_tls_creds_check_endpoint:
+ * @creds: pointer to a TLS credentials object
+ * @endpoint: type of network endpoint that will be using the credentials
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Check whether the credentials is setup according to
+ * the type of @endpoint argument.
+ *
+ * Returns true if the credentials is setup for the endpoint, false otherwise
+ */
+bool qcrypto_tls_creds_check_endpoint(QCryptoTLSCreds *creds,
+ QCryptoTLSCredsEndpoint endpoint,
+ Error **errp);
#endif /* QCRYPTO_TLSCREDS_H */
diff --git a/include/crypto/tlscredsanon.h b/include/crypto/tlscredsanon.h
index 9e9a5ce1a8..bd3023f9ea 100644
--- a/include/crypto/tlscredsanon.h
+++ b/include/crypto/tlscredsanon.h
@@ -22,13 +22,14 @@
#define QCRYPTO_TLSCREDSANON_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_ANON "tls-creds-anon"
-#define QCRYPTO_TLS_CREDS_ANON(obj) \
- OBJECT_CHECK(QCryptoTLSCredsAnon, (obj), TYPE_QCRYPTO_TLS_CREDS_ANON)
+typedef struct QCryptoTLSCredsAnon QCryptoTLSCredsAnon;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsAnon, QCRYPTO_TLS_CREDS_ANON,
+ TYPE_QCRYPTO_TLS_CREDS_ANON)
-typedef struct QCryptoTLSCredsAnon QCryptoTLSCredsAnon;
typedef struct QCryptoTLSCredsAnonClass QCryptoTLSCredsAnonClass;
/**
@@ -91,18 +92,6 @@ typedef struct QCryptoTLSCredsAnonClass QCryptoTLSCredsAnonClass;
*
*/
-
-struct QCryptoTLSCredsAnon {
- QCryptoTLSCreds parent_obj;
-#ifdef CONFIG_GNUTLS
- union {
- gnutls_anon_server_credentials_t server;
- gnutls_anon_client_credentials_t client;
- } data;
-#endif
-};
-
-
struct QCryptoTLSCredsAnonClass {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlscredspsk.h b/include/crypto/tlscredspsk.h
index 907035a29b..bcd07dc4f6 100644
--- a/include/crypto/tlscredspsk.h
+++ b/include/crypto/tlscredspsk.h
@@ -22,12 +22,13 @@
#define QCRYPTO_TLSCREDSPSK_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_PSK "tls-creds-psk"
-#define QCRYPTO_TLS_CREDS_PSK(obj) \
- OBJECT_CHECK(QCryptoTLSCredsPSK, (obj), TYPE_QCRYPTO_TLS_CREDS_PSK)
-
typedef struct QCryptoTLSCredsPSK QCryptoTLSCredsPSK;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsPSK, QCRYPTO_TLS_CREDS_PSK,
+ TYPE_QCRYPTO_TLS_CREDS_PSK)
+
typedef struct QCryptoTLSCredsPSKClass QCryptoTLSCredsPSKClass;
#define QCRYPTO_TLS_CREDS_PSKFILE "keys.psk"
@@ -86,18 +87,6 @@ typedef struct QCryptoTLSCredsPSKClass QCryptoTLSCredsPSKClass;
* The PSK file can be created and managed using psktool.
*/
-struct QCryptoTLSCredsPSK {
- QCryptoTLSCreds parent_obj;
- char *username;
-#ifdef CONFIG_GNUTLS
- union {
- gnutls_psk_server_credentials_t server;
- gnutls_psk_client_credentials_t client;
- } data;
-#endif
-};
-
-
struct QCryptoTLSCredsPSKClass {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlscredsx509.h b/include/crypto/tlscredsx509.h
index e1542e5c8c..c4daba21a6 100644
--- a/include/crypto/tlscredsx509.h
+++ b/include/crypto/tlscredsx509.h
@@ -22,12 +22,13 @@
#define QCRYPTO_TLSCREDSX509_H
#include "crypto/tlscreds.h"
+#include "qom/object.h"
#define TYPE_QCRYPTO_TLS_CREDS_X509 "tls-creds-x509"
-#define QCRYPTO_TLS_CREDS_X509(obj) \
- OBJECT_CHECK(QCryptoTLSCredsX509, (obj), TYPE_QCRYPTO_TLS_CREDS_X509)
-
typedef struct QCryptoTLSCredsX509 QCryptoTLSCredsX509;
+DECLARE_INSTANCE_CHECKER(QCryptoTLSCredsX509, QCRYPTO_TLS_CREDS_X509,
+ TYPE_QCRYPTO_TLS_CREDS_X509)
+
typedef struct QCryptoTLSCredsX509Class QCryptoTLSCredsX509Class;
#define QCRYPTO_TLS_CREDS_X509_CA_CERT "ca-cert.pem"
@@ -95,16 +96,6 @@ typedef struct QCryptoTLSCredsX509Class QCryptoTLSCredsX509Class;
*
*/
-struct QCryptoTLSCredsX509 {
- QCryptoTLSCreds parent_obj;
-#ifdef CONFIG_GNUTLS
- gnutls_certificate_credentials_t data;
-#endif
- bool sanityCheck;
- char *passwordid;
-};
-
-
struct QCryptoTLSCredsX509Class {
QCryptoTLSCredsClass parent_class;
};
diff --git a/include/crypto/tlssession.h b/include/crypto/tlssession.h
index 15b9cef086..571049bd0e 100644
--- a/include/crypto/tlssession.h
+++ b/include/crypto/tlssession.h
@@ -249,6 +249,17 @@ ssize_t qcrypto_tls_session_read(QCryptoTLSSession *sess,
size_t len);
/**
+ * qcrypto_tls_session_check_pending:
+ * @sess: the TLS session object
+ *
+ * Check if there are unread data in the TLS buffers that have
+ * already been read from the underlying data source.
+ *
+ * Returns: the number of bytes available or zero
+ */
+size_t qcrypto_tls_session_check_pending(QCryptoTLSSession *sess);
+
+/**
* qcrypto_tls_session_handshake:
* @sess: the TLS session object
* @errp: pointer to a NULL-initialized error object
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index 9856bf7921..a1d26ce903 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -9,6 +9,8 @@
#ifndef DISAS_DIS_ASM_H
#define DISAS_DIS_ASM_H
+#include "qemu/bswap.h"
+
typedef void *PTR;
typedef uint64_t bfd_vma;
typedef int64_t bfd_signed_vma;
@@ -186,20 +188,20 @@ enum bfd_architecture
#define bfd_mach_alpha_ev5 0x20
#define bfd_mach_alpha_ev6 0x30
bfd_arch_arm, /* Advanced Risc Machines ARM */
-#define bfd_mach_arm_unknown 0
-#define bfd_mach_arm_2 1
-#define bfd_mach_arm_2a 2
-#define bfd_mach_arm_3 3
-#define bfd_mach_arm_3M 4
-#define bfd_mach_arm_4 5
-#define bfd_mach_arm_4T 6
-#define bfd_mach_arm_5 7
-#define bfd_mach_arm_5T 8
-#define bfd_mach_arm_5TE 9
-#define bfd_mach_arm_XScale 10
-#define bfd_mach_arm_ep9312 11
-#define bfd_mach_arm_iWMMXt 12
-#define bfd_mach_arm_iWMMXt2 13
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
bfd_arch_ns32k, /* National Semiconductors ns32000 */
bfd_arch_w65, /* WDC 65816 */
bfd_arch_tic30, /* Texas Instruments TMS320C30 */
@@ -239,16 +241,11 @@ enum bfd_architecture
bfd_arch_ia64, /* HP/Intel ia64 */
#define bfd_mach_ia64_elf64 64
#define bfd_mach_ia64_elf32 32
- bfd_arch_nios2, /* Nios II */
-#define bfd_mach_nios2 0
-#define bfd_mach_nios2r1 1
-#define bfd_mach_nios2r2 2
- bfd_arch_lm32, /* Lattice Mico32 */
-#define bfd_mach_lm32 1
bfd_arch_rx, /* Renesas RX */
#define bfd_mach_rx 0x75
#define bfd_mach_rx_v2 0x76
#define bfd_mach_rx_v3 0x77
+ bfd_arch_loongarch,
bfd_arch_last
};
#define bfd_mach_s390_31 31
@@ -265,17 +262,17 @@ typedef struct symbol_cache_entry
} asymbol;
typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
enum dis_insn_type {
- dis_noninsn, /* Not a valid instruction */
- dis_nonbranch, /* Not a branch instruction */
- dis_branch, /* Unconditional branch */
- dis_condbranch, /* Conditional branch */
- dis_jsr, /* Jump to subroutine */
- dis_condjsr, /* Conditional jump to subroutine */
- dis_dref, /* Data reference instruction */
- dis_dref2 /* Two data references in instruction */
+ dis_noninsn, /* Not a valid instruction */
+ dis_nonbranch, /* Not a branch instruction */
+ dis_branch, /* Unconditional branch */
+ dis_condbranch, /* Conditional branch */
+ dis_jsr, /* Jump to subroutine */
+ dis_condjsr, /* Conditional jump to subroutine */
+ dis_dref, /* Data reference instruction */
+ dis_dref2 /* Two data references in instruction */
};
/* This struct is passed into the instruction decoding routine,
@@ -318,8 +315,8 @@ typedef struct disassemble_info {
The top 16 bits are reserved for public use (and are documented here).
The bottom 16 bits are for the internal use of the disassembler. */
unsigned long flags;
-#define INSN_HAS_RELOC 0x80000000
-#define INSN_ARM_BE32 0x00010000
+#define INSN_HAS_RELOC 0x80000000
+#define INSN_ARM_BE32 0x00010000
PTR private_data;
/* Function used to get bytes to disassemble. MEMADDR is the
@@ -329,7 +326,7 @@ typedef struct disassemble_info {
Returns an errno value or 0 for success. */
int (*read_memory_func)
(bfd_vma memaddr, bfd_byte *myaddr, int length,
- struct disassemble_info *info);
+ struct disassemble_info *info);
/* Function which should be called if we get an error that we can't
recover from. STATUS is the errno value from read_memory_func and
@@ -358,7 +355,7 @@ typedef struct disassemble_info {
(bfd_vma addr, struct disassemble_info * info);
/* These are for buffer_read_memory. */
- bfd_byte *buffer;
+ const bfd_byte *buffer;
bfd_vma buffer_vma;
int buffer_length;
@@ -383,20 +380,28 @@ typedef struct disassemble_info {
To determine whether this decoder supports this information, set
insn_info_valid to 0, decode an instruction, then check it. */
- char insn_info_valid; /* Branch info has been set. */
- char branch_delay_insns; /* How many sequential insn's will run before
- a branch takes effect. (0 = normal) */
- char data_size; /* Size of data reference in insn, in bytes */
- enum dis_insn_type insn_type; /* Type of instruction */
- bfd_vma target; /* Target address of branch or dref, if known;
- zero if unknown. */
- bfd_vma target2; /* Second target address for dref2 */
+ char insn_info_valid; /* Branch info has been set. */
+ char branch_delay_insns; /* How many sequential insn's will run before
+ a branch takes effect. (0 = normal) */
+ char data_size; /* Size of data reference in insn, in bytes */
+ enum dis_insn_type insn_type; /* Type of instruction */
+ bfd_vma target; /* Target address of branch or dref, if known;
+ zero if unknown. */
+ bfd_vma target2; /* Second target address for dref2 */
/* Command line options specific to the target disassembler. */
char * disassembler_options;
+ /*
+ * When true instruct the disassembler it may preface the
+ * disassembly with the opcodes values if it wants to. This is
+ * mainly for the benefit of the plugin interface which doesn't want
+ * that.
+ */
+ bool show_opcodes;
+
/* Field intended to be used by targets in any way they deem suitable. */
- int64_t target_info;
+ void *target_info;
/* Options for Capstone disassembly. */
int cap_arch;
@@ -406,7 +411,6 @@ typedef struct disassemble_info {
} disassemble_info;
-
/* Standard disassemblers. Disassemble one instruction at the given
target address. Return number of bytes processed. */
typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *);
@@ -415,7 +419,6 @@ int print_insn_tci(bfd_vma, disassemble_info*);
int print_insn_big_mips (bfd_vma, disassemble_info*);
int print_insn_little_mips (bfd_vma, disassemble_info*);
int print_insn_nanomips (bfd_vma, disassemble_info*);
-int print_insn_i386 (bfd_vma, disassemble_info*);
int print_insn_m68k (bfd_vma, disassemble_info*);
int print_insn_z8001 (bfd_vma, disassemble_info*);
int print_insn_z8002 (bfd_vma, disassemble_info*);
@@ -426,7 +429,6 @@ int print_insn_h8500 (bfd_vma, disassemble_info*);
int print_insn_arm_a64 (bfd_vma, disassemble_info*);
int print_insn_alpha (bfd_vma, disassemble_info*);
disassembler_ftype arc_get_disassembler (int, int);
-int print_insn_arm (bfd_vma, disassemble_info*);
int print_insn_sparc (bfd_vma, disassemble_info*);
int print_insn_big_a29k (bfd_vma, disassemble_info*);
int print_insn_little_a29k (bfd_vma, disassemble_info*);
@@ -438,7 +440,6 @@ int print_insn_m32r (bfd_vma, disassemble_info*);
int print_insn_m88k (bfd_vma, disassemble_info*);
int print_insn_mn10200 (bfd_vma, disassemble_info*);
int print_insn_mn10300 (bfd_vma, disassemble_info*);
-int print_insn_moxie (bfd_vma, disassemble_info*);
int print_insn_ns32k (bfd_vma, disassemble_info*);
int print_insn_big_powerpc (bfd_vma, disassemble_info*);
int print_insn_little_powerpc (bfd_vma, disassemble_info*);
@@ -447,80 +448,29 @@ int print_insn_w65 (bfd_vma, disassemble_info*);
int print_insn_d10v (bfd_vma, disassemble_info*);
int print_insn_v850 (bfd_vma, disassemble_info*);
int print_insn_tic30 (bfd_vma, disassemble_info*);
-int print_insn_ppc (bfd_vma, disassemble_info*);
-int print_insn_s390 (bfd_vma, disassemble_info*);
int print_insn_crisv32 (bfd_vma, disassemble_info*);
int print_insn_crisv10 (bfd_vma, disassemble_info*);
int print_insn_microblaze (bfd_vma, disassemble_info*);
int print_insn_ia64 (bfd_vma, disassemble_info*);
-int print_insn_lm32 (bfd_vma, disassemble_info*);
-int print_insn_big_nios2 (bfd_vma, disassemble_info*);
-int print_insn_little_nios2 (bfd_vma, disassemble_info*);
int print_insn_xtensa (bfd_vma, disassemble_info*);
int print_insn_riscv32 (bfd_vma, disassemble_info*);
int print_insn_riscv64 (bfd_vma, disassemble_info*);
+int print_insn_riscv128 (bfd_vma, disassemble_info*);
int print_insn_rx(bfd_vma, disassemble_info *);
-
-#if 0
-/* Fetch the disassembler for a given BFD, if that support is available. */
-disassembler_ftype disassembler(bfd *);
-#endif
-
-
-/* This block of definitions is for particular callers who read instructions
- into a buffer before calling the instruction decoder. */
-
-/* Here is a function which callers may wish to use for read_memory_func.
- It gets bytes from a buffer. */
-int buffer_read_memory(bfd_vma, bfd_byte *, int, struct disassemble_info *);
-
-/* This function goes with buffer_read_memory.
- It prints a message using info->fprintf_func and info->stream. */
-void perror_memory(int, bfd_vma, struct disassemble_info *);
-
-
-/* Just print the address in hex. This is included for completeness even
- though both GDB and objdump provide their own (to print symbolic
- addresses). */
-void generic_print_address(bfd_vma, struct disassemble_info *);
-
-/* Always true. */
-int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
-
-/* Macro to initialize a disassemble_info struct. This should be called
- by all applications creating such a struct. */
-#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
- (INFO).flavour = bfd_target_unknown_flavour, \
- (INFO).arch = bfd_arch_unknown, \
- (INFO).mach = 0, \
- (INFO).endian = BFD_ENDIAN_UNKNOWN, \
- INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC)
-
-/* Call this macro to initialize only the internal variables for the
- disassembler. Architecture dependent things such as byte order, or machine
- variant are not touched by this macro. This makes things much easier for
- GDB which must initialize these things separately. */
-
-#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
- (INFO).fprintf_func = (FPRINTF_FUNC), \
- (INFO).stream = (STREAM), \
- (INFO).symbols = NULL, \
- (INFO).num_symbols = 0, \
- (INFO).private_data = NULL, \
- (INFO).buffer = NULL, \
- (INFO).buffer_vma = 0, \
- (INFO).buffer_length = 0, \
- (INFO).read_memory_func = buffer_read_memory, \
- (INFO).memory_error_func = perror_memory, \
- (INFO).print_address_func = generic_print_address, \
- (INFO).print_insn = NULL, \
- (INFO).symbol_at_address_func = generic_symbol_at_address, \
- (INFO).flags = 0, \
- (INFO).bytes_per_line = 0, \
- (INFO).bytes_per_chunk = 0, \
- (INFO).display_endian = BFD_ENDIAN_UNKNOWN, \
- (INFO).disassembler_options = NULL, \
- (INFO).insn_info_valid = 0
+int print_insn_hexagon(bfd_vma, disassemble_info *);
+int print_insn_loongarch(bfd_vma, disassemble_info *);
+
+#ifdef CONFIG_CAPSTONE
+bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size);
+bool cap_disas_host(disassemble_info *info, const void *code, size_t size);
+bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count);
+bool cap_disas_plugin(disassemble_info *info, uint64_t pc, size_t size);
+#else
+# define cap_disas_target(i, p, s) false
+# define cap_disas_host(i, p, s) false
+# define cap_disas_monitor(i, p, c) false
+# define cap_disas_plugin(i, p, c) false
+#endif /* CONFIG_CAPSTONE */
#ifndef ATTRIBUTE_UNUSED
#define ATTRIBUTE_UNUSED __attribute__((unused))
@@ -528,11 +478,31 @@ int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
/* from libbfd */
-bfd_vma bfd_getl64 (const bfd_byte *addr);
-bfd_vma bfd_getl32 (const bfd_byte *addr);
-bfd_vma bfd_getb32 (const bfd_byte *addr);
-bfd_vma bfd_getl16 (const bfd_byte *addr);
-bfd_vma bfd_getb16 (const bfd_byte *addr);
+static inline bfd_vma bfd_getl64(const bfd_byte *addr)
+{
+ return ldq_le_p(addr);
+}
+
+static inline bfd_vma bfd_getl32(const bfd_byte *addr)
+{
+ return (uint32_t)ldl_le_p(addr);
+}
+
+static inline bfd_vma bfd_getl16(const bfd_byte *addr)
+{
+ return lduw_le_p(addr);
+}
+
+static inline bfd_vma bfd_getb32(const bfd_byte *addr)
+{
+ return (uint32_t)ldl_be_p(addr);
+}
+
+static inline bfd_vma bfd_getb16(const bfd_byte *addr)
+{
+ return lduw_be_p(addr);
+}
+
typedef bool bfd_boolean;
#endif /* DISAS_DIS_ASM_H */
diff --git a/include/disas/disas.h b/include/disas/disas.h
index 1b6e035e32..176775eff7 100644
--- a/include/disas/disas.h
+++ b/include/disas/disas.h
@@ -1,34 +1,23 @@
#ifndef QEMU_DISAS_H
#define QEMU_DISAS_H
-#include "exec/hwaddr.h"
-
-#ifdef NEED_CPU_H
-#include "cpu.h"
-
/* Disassemble this for me please... (debugging). */
-void disas(FILE *out, void *code, unsigned long size, const char *note);
-void target_disas(FILE *out, CPUState *cpu, target_ulong code,
- target_ulong size);
+void disas(FILE *out, const void *code, size_t size);
+void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size);
-void monitor_disas(Monitor *mon, CPUState *cpu,
- target_ulong pc, int nb_insn, int is_physical);
+void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc,
+ int nb_insn, bool is_physical);
char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size);
/* Look up symbol for debugging purpose. Returns "" if unknown. */
-const char *lookup_symbol(target_ulong orig_addr);
-#endif
+const char *lookup_symbol(uint64_t orig_addr);
struct syminfo;
struct elf32_sym;
struct elf64_sym;
-#if defined(CONFIG_USER_ONLY)
-typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr);
-#else
-typedef const char *(*lookup_symbol_t)(struct syminfo *s, hwaddr orig_addr);
-#endif
+typedef const char *(*lookup_symbol_t)(struct syminfo *s, uint64_t orig_addr);
struct syminfo {
lookup_symbol_t lookup_symbol;
diff --git a/include/elf.h b/include/elf.h
index 5b06b55f28..e7259ec366 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -11,23 +11,28 @@ typedef uint32_t Elf32_Word;
/* 64-bit ELF base types. */
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
-typedef int16_t Elf64_SHalf;
+typedef int16_t Elf64_SHalf;
typedef uint64_t Elf64_Off;
-typedef int32_t Elf64_Sword;
+typedef int32_t Elf64_Sword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef int64_t Elf64_Sxword;
/* These constants are for the segment types stored in the image headers */
-#define PT_NULL 0
-#define PT_LOAD 1
-#define PT_DYNAMIC 2
-#define PT_INTERP 3
-#define PT_NOTE 4
-#define PT_SHLIB 5
-#define PT_PHDR 6
-#define PT_LOPROC 0x70000000
-#define PT_HIPROC 0x7fffffff
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_LOOS 0x60000000
+#define PT_HIOS 0x6fffffff
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
#define PT_MIPS_REGINFO 0x70000000
#define PT_MIPS_RTPROC 0x70000001
@@ -36,34 +41,34 @@ typedef int64_t Elf64_Sxword;
/* Flags in the e_flags field of the header */
/* MIPS architecture level. */
-#define EF_MIPS_ARCH 0xf0000000
+#define EF_MIPS_ARCH 0xf0000000
/* Legal values for MIPS architecture level. */
-#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
-#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
-#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
-#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
-#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
-#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
-#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
-#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
-#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
-#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
-#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
+#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
+#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
+#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
/* The ABI of a file. */
-#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
-#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
-
-#define EF_MIPS_NOREORDER 0x00000001
-#define EF_MIPS_PIC 0x00000002
-#define EF_MIPS_CPIC 0x00000004
-#define EF_MIPS_ABI2 0x00000020
-#define EF_MIPS_OPTIONS_FIRST 0x00000080
-#define EF_MIPS_32BITMODE 0x00000100
-#define EF_MIPS_ABI 0x0000f000
-#define EF_MIPS_FP64 0x00000200
-#define EF_MIPS_NAN2008 0x00000400
+#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
+#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
+
+#define EF_MIPS_NOREORDER 0x00000001
+#define EF_MIPS_PIC 0x00000002
+#define EF_MIPS_CPIC 0x00000004
+#define EF_MIPS_ABI2 0x00000020
+#define EF_MIPS_OPTIONS_FIRST 0x00000080
+#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_FP64 0x00000200
+#define EF_MIPS_NAN2008 0x00000400
/* MIPS machine variant */
#define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */
@@ -124,163 +129,162 @@ typedef struct mips_elf_abiflags_v0 {
#define ET_HIPROC 0xffff
/* These constants define the various ELF target machines */
-#define EM_NONE 0
-#define EM_M32 1
-#define EM_SPARC 2
-#define EM_386 3
-#define EM_68K 4
-#define EM_88K 5
-#define EM_486 6 /* Perhaps disused */
-#define EM_860 7
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_486 6 /* Perhaps disused */
+#define EM_860 7
-#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
+#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
-#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
-#define EM_PARISC 15 /* HPPA */
+#define EM_PARISC 15 /* HPPA */
-#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
+#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
-#define EM_PPC 20 /* PowerPC */
-#define EM_PPC64 21 /* PowerPC64 */
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC64 */
-#define EM_ARM 40 /* ARM */
+#define EM_ARM 40 /* ARM */
-#define EM_SH 42 /* SuperH */
+#define EM_SH 42 /* SuperH */
-#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit */
-#define EM_TRICORE 44 /* Infineon TriCore */
+#define EM_TRICORE 44 /* Infineon TriCore */
-#define EM_IA_64 50 /* HP/Intel IA-64 */
+#define EM_IA_64 50 /* HP/Intel IA-64 */
-#define EM_X86_64 62 /* AMD x86-64 */
+#define EM_X86_64 62 /* AMD x86-64 */
-#define EM_S390 22 /* IBM S/390 */
+#define EM_S390 22 /* IBM S/390 */
-#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
-#define EM_AVR 83 /* AVR 8-bit microcontroller */
+#define EM_AVR 83 /* AVR 8-bit microcontroller */
-#define EM_V850 87 /* NEC v850 */
+#define EM_V850 87 /* NEC v850 */
-#define EM_H8_300H 47 /* Hitachi H8/300H */
-#define EM_H8S 48 /* Hitachi H8S */
-#define EM_LATTICEMICO32 138 /* LatticeMico32 */
+#define EM_H8_300H 47 /* Hitachi H8/300H */
+#define EM_H8S 48 /* Hitachi H8S */
+#define EM_LATTICEMICO32 138 /* LatticeMico32 */
-#define EM_OPENRISC 92 /* OpenCores OpenRISC */
+#define EM_OPENRISC 92 /* OpenCores OpenRISC */
-#define EM_UNICORE32 110 /* UniCore32 */
+#define EM_HEXAGON 164 /* Qualcomm Hexagon */
-#define EM_RISCV 243 /* RISC-V */
+#define EM_RX 173 /* Renesas RX family */
-#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+#define EM_RISCV 243 /* RISC-V */
+
+#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+
+#define EM_LOONGARCH 258 /* LoongArch */
/*
* This is an interim value that we will use until the committee comes
* up with a final number.
*/
-#define EM_ALPHA 0x9026
+#define EM_ALPHA 0x9026
/* Bogus old v850 magic number, used by old tools. */
-#define EM_CYGNUS_V850 0x9080
+#define EM_CYGNUS_V850 0x9080
/*
* This is the old interim value for S/390 architecture
*/
-#define EM_S390_OLD 0xA390
-
-#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
-
-#define EM_MICROBLAZE 189
-#define EM_MICROBLAZE_OLD 0xBAAB
+#define EM_S390_OLD 0xA390
-#define EM_XTENSA 94 /* Tensilica Xtensa */
+#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
-#define EM_AARCH64 183
+#define EM_MICROBLAZE 189
+#define EM_MICROBLAZE_OLD 0xBAAB
-#define EM_TILEGX 191 /* TILE-Gx */
+#define EM_XTENSA 94 /* Tensilica Xtensa */
-#define EM_MOXIE 223 /* Moxie processor family */
-#define EM_MOXIE_OLD 0xFEED
+#define EM_AARCH64 183
-#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
+#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
/* This is the info that is needed to parse the dynamic section of the file */
-#define DT_NULL 0
-#define DT_NEEDED 1
-#define DT_PLTRELSZ 2
-#define DT_PLTGOT 3
-#define DT_HASH 4
-#define DT_STRTAB 5
-#define DT_SYMTAB 6
-#define DT_RELA 7
-#define DT_RELASZ 8
-#define DT_RELAENT 9
-#define DT_STRSZ 10
-#define DT_SYMENT 11
-#define DT_INIT 12
-#define DT_FINI 13
-#define DT_SONAME 14
-#define DT_RPATH 15
-#define DT_SYMBOLIC 16
-#define DT_REL 17
-#define DT_RELSZ 18
-#define DT_RELENT 19
-#define DT_PLTREL 20
-#define DT_DEBUG 21
-#define DT_TEXTREL 22
-#define DT_JMPREL 23
-#define DT_BINDNOW 24
-#define DT_INIT_ARRAY 25
-#define DT_FINI_ARRAY 26
-#define DT_INIT_ARRAYSZ 27
-#define DT_FINI_ARRAYSZ 28
-#define DT_RUNPATH 29
-#define DT_FLAGS 30
-#define DT_LOOS 0x6000000d
-#define DT_HIOS 0x6ffff000
-#define DT_LOPROC 0x70000000
-#define DT_HIPROC 0x7fffffff
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_BINDNOW 24
+#define DT_INIT_ARRAY 25
+#define DT_FINI_ARRAY 26
+#define DT_INIT_ARRAYSZ 27
+#define DT_FINI_ARRAYSZ 28
+#define DT_RUNPATH 29
+#define DT_FLAGS 30
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
/* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use
the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */
-#define DT_VALRNGLO 0x6ffffd00
-#define DT_VALRNGHI 0x6ffffdff
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
/* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use
the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */
-#define DT_ADDRRNGLO 0x6ffffe00
-#define DT_ADDRRNGHI 0x6ffffeff
-
-#define DT_VERSYM 0x6ffffff0
-#define DT_RELACOUNT 0x6ffffff9
-#define DT_RELCOUNT 0x6ffffffa
-#define DT_FLAGS_1 0x6ffffffb
-#define DT_VERDEF 0x6ffffffc
-#define DT_VERDEFNUM 0x6ffffffd
-#define DT_VERNEED 0x6ffffffe
-#define DT_VERNEEDNUM 0x6fffffff
-
-#define DT_MIPS_RLD_VERSION 0x70000001
-#define DT_MIPS_TIME_STAMP 0x70000002
-#define DT_MIPS_ICHECKSUM 0x70000003
-#define DT_MIPS_IVERSION 0x70000004
-#define DT_MIPS_FLAGS 0x70000005
- #define RHF_NONE 0
- #define RHF_HARDWAY 1
- #define RHF_NOTPOT 2
-#define DT_MIPS_BASE_ADDRESS 0x70000006
-#define DT_MIPS_CONFLICT 0x70000008
-#define DT_MIPS_LIBLIST 0x70000009
-#define DT_MIPS_LOCAL_GOTNO 0x7000000a
-#define DT_MIPS_CONFLICTNO 0x7000000b
-#define DT_MIPS_LIBLISTNO 0x70000010
-#define DT_MIPS_SYMTABNO 0x70000011
-#define DT_MIPS_UNREFEXTNO 0x70000012
-#define DT_MIPS_GOTSYM 0x70000013
-#define DT_MIPS_HIPAGENO 0x70000014
-#define DT_MIPS_RLD_MAP 0x70000016
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
+
+#define DT_MIPS_RLD_VERSION 0x70000001
+#define DT_MIPS_TIME_STAMP 0x70000002
+#define DT_MIPS_ICHECKSUM 0x70000003
+#define DT_MIPS_IVERSION 0x70000004
+#define DT_MIPS_FLAGS 0x70000005
+ #define RHF_NONE 0
+ #define RHF_HARDWAY 1
+ #define RHF_NOTPOT 2
+#define DT_MIPS_BASE_ADDRESS 0x70000006
+#define DT_MIPS_CONFLICT 0x70000008
+#define DT_MIPS_LIBLIST 0x70000009
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a
+#define DT_MIPS_CONFLICTNO 0x7000000b
+#define DT_MIPS_LIBLISTNO 0x70000010
+#define DT_MIPS_SYMTABNO 0x70000011
+#define DT_MIPS_UNREFEXTNO 0x70000012
+#define DT_MIPS_GOTSYM 0x70000013
+#define DT_MIPS_HIPAGENO 0x70000014
+#define DT_MIPS_RLD_MAP 0x70000016
/* This info is needed when parsing the symbol table */
#define STB_LOCAL 0
@@ -293,61 +297,61 @@ typedef struct mips_elf_abiflags_v0 {
#define STT_SECTION 3
#define STT_FILE 4
-#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
#define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf))
-#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
-#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
/* Symbolic values for the entries in the auxiliary table
put on the initial stack */
-#define AT_NULL 0 /* end of vector */
-#define AT_IGNORE 1 /* entry should be ignored */
-#define AT_EXECFD 2 /* file descriptor of program */
-#define AT_PHDR 3 /* program headers for program */
-#define AT_PHENT 4 /* size of program header entry */
-#define AT_PHNUM 5 /* number of program headers */
-#define AT_PAGESZ 6 /* system page size */
-#define AT_BASE 7 /* base address of interpreter */
-#define AT_FLAGS 8 /* flags */
-#define AT_ENTRY 9 /* entry point of program */
-#define AT_NOTELF 10 /* program is not ELF */
-#define AT_UID 11 /* real uid */
-#define AT_EUID 12 /* effective uid */
-#define AT_GID 13 /* real gid */
-#define AT_EGID 14 /* effective gid */
-#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
-#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
-#define AT_CLKTCK 17 /* frequency at which times() increments */
-#define AT_FPUCW 18 /* info about fpu initialization by kernel */
-#define AT_DCACHEBSIZE 19 /* data cache block size */
-#define AT_ICACHEBSIZE 20 /* instruction cache block size */
-#define AT_UCACHEBSIZE 21 /* unified cache block size */
-#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
-#define AT_SECURE 23 /* boolean, was exec suid-like? */
-#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
-#define AT_RANDOM 25 /* address of 16 random bytes */
-#define AT_HWCAP2 26 /* extension of AT_HWCAP */
-#define AT_EXECFN 31 /* filename of the executable */
-#define AT_SYSINFO 32 /* address of kernel entry point */
-#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
-#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
-#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
-#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
-#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
+#define AT_NULL 0 /* end of vector */
+#define AT_IGNORE 1 /* entry should be ignored */
+#define AT_EXECFD 2 /* file descriptor of program */
+#define AT_PHDR 3 /* program headers for program */
+#define AT_PHENT 4 /* size of program header entry */
+#define AT_PHNUM 5 /* number of program headers */
+#define AT_PAGESZ 6 /* system page size */
+#define AT_BASE 7 /* base address of interpreter */
+#define AT_FLAGS 8 /* flags */
+#define AT_ENTRY 9 /* entry point of program */
+#define AT_NOTELF 10 /* program is not ELF */
+#define AT_UID 11 /* real uid */
+#define AT_EUID 12 /* effective uid */
+#define AT_GID 13 /* real gid */
+#define AT_EGID 14 /* effective gid */
+#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
+#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17 /* frequency at which times() increments */
+#define AT_FPUCW 18 /* info about fpu initialization by kernel */
+#define AT_DCACHEBSIZE 19 /* data cache block size */
+#define AT_ICACHEBSIZE 20 /* instruction cache block size */
+#define AT_UCACHEBSIZE 21 /* unified cache block size */
+#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
+#define AT_SECURE 23 /* boolean, was exec suid-like? */
+#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
+#define AT_RANDOM 25 /* address of 16 random bytes */
+#define AT_HWCAP2 26 /* extension of AT_HWCAP */
+#define AT_EXECFN 31 /* filename of the executable */
+#define AT_SYSINFO 32 /* address of kernel entry point */
+#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
+#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
+#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
+#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
+#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
typedef struct dynamic{
Elf32_Sword d_tag;
union{
- Elf32_Sword d_val;
- Elf32_Addr d_ptr;
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;
typedef struct {
- Elf64_Sxword d_tag; /* entry tag value */
+ Elf64_Sxword d_tag; /* entry tag value */
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
@@ -358,72 +362,72 @@ typedef struct {
#define ELF32_R_SYM(x) ((x) >> 8)
#define ELF32_R_TYPE(x) ((x) & 0xff)
-#define ELF64_R_SYM(i) ((i) >> 32)
-#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
#define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000)
-#define R_386_NONE 0
-#define R_386_32 1
-#define R_386_PC32 2
-#define R_386_GOT32 3
-#define R_386_PLT32 4
-#define R_386_COPY 5
-#define R_386_GLOB_DAT 6
-#define R_386_JMP_SLOT 7
-#define R_386_RELATIVE 8
-#define R_386_GOTOFF 9
-#define R_386_GOTPC 10
-#define R_386_NUM 11
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
/* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */
-#define R_386_PC8 23
-
-#define R_MIPS_NONE 0
-#define R_MIPS_16 1
-#define R_MIPS_32 2
-#define R_MIPS_REL32 3
-#define R_MIPS_26 4
-#define R_MIPS_HI16 5
-#define R_MIPS_LO16 6
-#define R_MIPS_GPREL16 7
-#define R_MIPS_LITERAL 8
-#define R_MIPS_GOT16 9
-#define R_MIPS_PC16 10
-#define R_MIPS_CALL16 11
-#define R_MIPS_GPREL32 12
+#define R_386_PC8 23
+
+#define R_MIPS_NONE 0
+#define R_MIPS_16 1
+#define R_MIPS_32 2
+#define R_MIPS_REL32 3
+#define R_MIPS_26 4
+#define R_MIPS_HI16 5
+#define R_MIPS_LO16 6
+#define R_MIPS_GPREL16 7
+#define R_MIPS_LITERAL 8
+#define R_MIPS_GOT16 9
+#define R_MIPS_PC16 10
+#define R_MIPS_CALL16 11
+#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
in the MIPS ELF ABI. */
-#define R_MIPS_UNUSED1 13
-#define R_MIPS_UNUSED2 14
-#define R_MIPS_UNUSED3 15
-#define R_MIPS_SHIFT5 16
-#define R_MIPS_SHIFT6 17
-#define R_MIPS_64 18
-#define R_MIPS_GOT_DISP 19
-#define R_MIPS_GOT_PAGE 20
-#define R_MIPS_GOT_OFST 21
+#define R_MIPS_UNUSED1 13
+#define R_MIPS_UNUSED2 14
+#define R_MIPS_UNUSED3 15
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_GOTHI16 22
-#define R_MIPS_GOTLO16 23
-#define R_MIPS_SUB 24
-#define R_MIPS_INSERT_A 25
-#define R_MIPS_INSERT_B 26
-#define R_MIPS_DELETE 27
-#define R_MIPS_HIGHER 28
-#define R_MIPS_HIGHEST 29
+#define R_MIPS_GOTHI16 22
+#define R_MIPS_GOTLO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_CALLHI16 30
-#define R_MIPS_CALLLO16 31
+#define R_MIPS_CALLHI16 30
+#define R_MIPS_CALLLO16 31
/*
* This range is reserved for vendor specific relocations.
*/
-#define R_MIPS_LOVENDOR 100
-#define R_MIPS_HIVENDOR 127
+#define R_MIPS_LOVENDOR 100
+#define R_MIPS_HIVENDOR 127
/* SUN SPARC specific definitions. */
@@ -444,48 +448,48 @@ typedef struct {
/*
* Sparc ELF relocation types
*/
-#define R_SPARC_NONE 0
-#define R_SPARC_8 1
-#define R_SPARC_16 2
-#define R_SPARC_32 3
-#define R_SPARC_DISP8 4
-#define R_SPARC_DISP16 5
-#define R_SPARC_DISP32 6
-#define R_SPARC_WDISP30 7
-#define R_SPARC_WDISP22 8
-#define R_SPARC_HI22 9
-#define R_SPARC_22 10
-#define R_SPARC_13 11
-#define R_SPARC_LO10 12
-#define R_SPARC_GOT10 13
-#define R_SPARC_GOT13 14
-#define R_SPARC_GOT22 15
-#define R_SPARC_PC10 16
-#define R_SPARC_PC22 17
-#define R_SPARC_WPLT30 18
-#define R_SPARC_COPY 19
-#define R_SPARC_GLOB_DAT 20
-#define R_SPARC_JMP_SLOT 21
-#define R_SPARC_RELATIVE 22
-#define R_SPARC_UA32 23
-#define R_SPARC_PLT32 24
-#define R_SPARC_HIPLT22 25
-#define R_SPARC_LOPLT10 26
-#define R_SPARC_PCPLT32 27
-#define R_SPARC_PCPLT22 28
-#define R_SPARC_PCPLT10 29
-#define R_SPARC_10 30
-#define R_SPARC_11 31
-#define R_SPARC_64 32
-#define R_SPARC_OLO10 33
-#define R_SPARC_HH22 34
-#define R_SPARC_HM10 35
-#define R_SPARC_LM22 36
-#define R_SPARC_WDISP16 40
-#define R_SPARC_WDISP19 41
-#define R_SPARC_7 43
-#define R_SPARC_5 44
-#define R_SPARC_6 45
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
/* Bits present in AT_HWCAP for ARM. */
@@ -558,6 +562,7 @@ typedef struct {
#define PPC_FEATURE2_HTM_NOSC 0x01000000
#define PPC_FEATURE2_ARCH_3_00 0x00800000
#define PPC_FEATURE2_HAS_IEEE128 0x00400000
+#define PPC_FEATURE2_ARCH_3_10 0x00040000
/* Bits present in AT_HWCAP for Sparc. */
@@ -591,18 +596,53 @@ typedef struct {
/* Bits present in AT_HWCAP for s390. */
-#define HWCAP_S390_ESAN3 1
-#define HWCAP_S390_ZARCH 2
-#define HWCAP_S390_STFLE 4
-#define HWCAP_S390_MSA 8
-#define HWCAP_S390_LDISP 16
-#define HWCAP_S390_EIMM 32
-#define HWCAP_S390_DFP 64
-#define HWCAP_S390_HPAGE 128
-#define HWCAP_S390_ETF3EH 256
-#define HWCAP_S390_HIGH_GPRS 512
-#define HWCAP_S390_TE 1024
-#define HWCAP_S390_VXRS 2048
+#define HWCAP_S390_NR_ESAN3 0
+#define HWCAP_S390_NR_ZARCH 1
+#define HWCAP_S390_NR_STFLE 2
+#define HWCAP_S390_NR_MSA 3
+#define HWCAP_S390_NR_LDISP 4
+#define HWCAP_S390_NR_EIMM 5
+#define HWCAP_S390_NR_DFP 6
+#define HWCAP_S390_NR_HPAGE 7
+#define HWCAP_S390_NR_ETF3EH 8
+#define HWCAP_S390_NR_HIGH_GPRS 9
+#define HWCAP_S390_NR_TE 10
+#define HWCAP_S390_NR_VXRS 11
+#define HWCAP_S390_NR_VXRS_BCD 12
+#define HWCAP_S390_NR_VXRS_EXT 13
+#define HWCAP_S390_NR_GS 14
+#define HWCAP_S390_NR_VXRS_EXT2 15
+#define HWCAP_S390_NR_VXRS_PDE 16
+#define HWCAP_S390_NR_SORT 17
+#define HWCAP_S390_NR_DFLT 18
+#define HWCAP_S390_NR_VXRS_PDE2 19
+#define HWCAP_S390_NR_NNPA 20
+#define HWCAP_S390_NR_PCI_MIO 21
+#define HWCAP_S390_NR_SIE 22
+
+#define HWCAP_S390_ESAN3 (1 << HWCAP_S390_NR_ESAN3)
+#define HWCAP_S390_ZARCH (1 << HWCAP_S390_NR_ZARCH)
+#define HWCAP_S390_STFLE (1 << HWCAP_S390_NR_STFLE)
+#define HWCAP_S390_MSA (1 << HWCAP_S390_NR_MSA)
+#define HWCAP_S390_LDISP (1 << HWCAP_S390_NR_LDISP)
+#define HWCAP_S390_EIMM (1 << HWCAP_S390_NR_EIMM)
+#define HWCAP_S390_DFP (1 << HWCAP_S390_NR_DFP)
+#define HWCAP_S390_HPAGE (1 << HWCAP_S390_NR_HPAGE)
+#define HWCAP_S390_ETF3EH (1 << HWCAP_S390_NR_ETF3EH)
+#define HWCAP_S390_HIGH_GPRS (1 << HWCAP_S390_NR_HIGH_GPRS)
+#define HWCAP_S390_TE (1 << HWCAP_S390_NR_TE)
+#define HWCAP_S390_VXRS (1 << HWCAP_S390_NR_VXRS)
+#define HWCAP_S390_VXRS_BCD (1 << HWCAP_S390_NR_VXRS_BCD)
+#define HWCAP_S390_VXRS_EXT (1 << HWCAP_S390_NR_VXRS_EXT)
+#define HWCAP_S390_GS (1 << HWCAP_S390_NR_GS)
+#define HWCAP_S390_VXRS_EXT2 (1 << HWCAP_S390_NR_VXRS_EXT2)
+#define HWCAP_S390_VXRS_PDE (1 << HWCAP_S390_NR_VXRS_PDE)
+#define HWCAP_S390_SORT (1 << HWCAP_S390_NR_SORT)
+#define HWCAP_S390_DFLT (1 << HWCAP_S390_NR_DFLT)
+#define HWCAP_S390_VXRS_PDE2 (1 << HWCAP_S390_NR_VXRS_PDE2)
+#define HWCAP_S390_NNPA (1 << HWCAP_S390_NR_NNPA)
+#define HWCAP_S390_PCI_MIO (1 << HWCAP_S390_NR_PCI_MIO)
+#define HWCAP_S390_SIE (1 << HWCAP_S390_NR_SIE)
/* M68K specific definitions. */
/* We use the top 24 bits to encode information about the
@@ -635,29 +675,29 @@ typedef struct {
/*
* 68k ELF relocation types
*/
-#define R_68K_NONE 0
-#define R_68K_32 1
-#define R_68K_16 2
-#define R_68K_8 3
-#define R_68K_PC32 4
-#define R_68K_PC16 5
-#define R_68K_PC8 6
-#define R_68K_GOT32 7
-#define R_68K_GOT16 8
-#define R_68K_GOT8 9
-#define R_68K_GOT32O 10
-#define R_68K_GOT16O 11
-#define R_68K_GOT8O 12
-#define R_68K_PLT32 13
-#define R_68K_PLT16 14
-#define R_68K_PLT8 15
-#define R_68K_PLT32O 16
-#define R_68K_PLT16O 17
-#define R_68K_PLT8O 18
-#define R_68K_COPY 19
-#define R_68K_GLOB_DAT 20
-#define R_68K_JMP_SLOT 21
-#define R_68K_RELATIVE 22
+#define R_68K_NONE 0
+#define R_68K_32 1
+#define R_68K_16 2
+#define R_68K_8 3
+#define R_68K_PC32 4
+#define R_68K_PC16 5
+#define R_68K_PC8 6
+#define R_68K_GOT32 7
+#define R_68K_GOT16 8
+#define R_68K_GOT8 9
+#define R_68K_GOT32O 10
+#define R_68K_GOT16O 11
+#define R_68K_GOT8O 12
+#define R_68K_PLT32 13
+#define R_68K_PLT16 14
+#define R_68K_PLT8 15
+#define R_68K_PLT32O 16
+#define R_68K_PLT16O 17
+#define R_68K_PLT8O 18
+#define R_68K_COPY 19
+#define R_68K_GLOB_DAT 20
+#define R_68K_JMP_SLOT 21
+#define R_68K_RELATIVE 22
/*
* Alpha ELF relocation types
@@ -681,7 +721,7 @@ typedef struct {
#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
-#define R_ALPHA_BRSGP 28
+#define R_ALPHA_BRSGP 28
#define R_ALPHA_TLSGD 29
#define R_ALPHA_TLS_LDM 30
#define R_ALPHA_DTPMOD64 31
@@ -696,78 +736,78 @@ typedef struct {
#define R_ALPHA_TPRELLO 40
#define R_ALPHA_TPREL16 41
-#define SHF_ALPHA_GPREL 0x10000000
+#define SHF_ALPHA_GPREL 0x10000000
/* PowerPC specific definitions. */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_PPC64_ABI 0x3
+#define EF_PPC64_ABI 0x3
/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE 0
-#define R_PPC_ADDR32 1 /* 32bit absolute address */
-#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
-#define R_PPC_ADDR16 3 /* 16bit absolute address */
-#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
-#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN 8
-#define R_PPC_ADDR14_BRNTAKEN 9
-#define R_PPC_REL24 10 /* PC relative 26 bit */
-#define R_PPC_REL14 11 /* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN 12
-#define R_PPC_REL14_BRNTAKEN 13
-#define R_PPC_GOT16 14
-#define R_PPC_GOT16_LO 15
-#define R_PPC_GOT16_HI 16
-#define R_PPC_GOT16_HA 17
-#define R_PPC_PLTREL24 18
-#define R_PPC_COPY 19
-#define R_PPC_GLOB_DAT 20
-#define R_PPC_JMP_SLOT 21
-#define R_PPC_RELATIVE 22
-#define R_PPC_LOCAL24PC 23
-#define R_PPC_UADDR32 24
-#define R_PPC_UADDR16 25
-#define R_PPC_REL32 26
-#define R_PPC_PLT32 27
-#define R_PPC_PLTREL32 28
-#define R_PPC_PLT16_LO 29
-#define R_PPC_PLT16_HI 30
-#define R_PPC_PLT16_HA 31
-#define R_PPC_SDAREL16 32
-#define R_PPC_SECTOFF 33
-#define R_PPC_SECTOFF_LO 34
-#define R_PPC_SECTOFF_HI 35
-#define R_PPC_SECTOFF_HA 36
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
/* Keep this the last entry. */
#ifndef R_PPC_NUM
-#define R_PPC_NUM 37
+#define R_PPC_NUM 37
#endif
/* ARM specific declarations */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_ARM_RELEXEC 0x01
-#define EF_ARM_HASENTRY 0x02
-#define EF_ARM_INTERWORK 0x04
-#define EF_ARM_APCS_26 0x08
-#define EF_ARM_APCS_FLOAT 0x10
-#define EF_ARM_PIC 0x20
-#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
-#define EF_NEW_ABI 0x80
-#define EF_OLD_ABI 0x100
-#define EF_ARM_SOFT_FLOAT 0x200
-#define EF_ARM_VFP_FLOAT 0x400
+#define EF_ARM_RELEXEC 0x01
+#define EF_ARM_HASENTRY 0x02
+#define EF_ARM_INTERWORK 0x04
+#define EF_ARM_APCS_26 0x08
+#define EF_ARM_APCS_FLOAT 0x10
+#define EF_ARM_PIC 0x20
+#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
+#define EF_NEW_ABI 0x80
+#define EF_OLD_ABI 0x100
+#define EF_ARM_SOFT_FLOAT 0x200
+#define EF_ARM_VFP_FLOAT 0x400
#define EF_ARM_MAVERICK_FLOAT 0x800
/* Other constants defined in the ARM ELF spec. version B-01. */
-#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
-#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
-#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
-#define EF_ARM_EABIMASK 0xFF000000
+#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
+#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
+#define EF_ARM_EABIMASK 0xFF000000
/* Constants defined in AAELF. */
#define EF_ARM_BE8 0x00800000
@@ -794,46 +834,46 @@ typedef struct {
addressed by the static base */
/* ARM relocs. */
-#define R_ARM_NONE 0 /* No reloc */
-#define R_ARM_PC24 1 /* PC relative 26 bit branch */
-#define R_ARM_ABS32 2 /* Direct 32 bit */
-#define R_ARM_REL32 3 /* PC relative 32 bit */
-#define R_ARM_PC13 4
-#define R_ARM_ABS16 5 /* Direct 16 bit */
-#define R_ARM_ABS12 6 /* Direct 12 bit */
-#define R_ARM_THM_ABS5 7
-#define R_ARM_ABS8 8 /* Direct 8 bit */
-#define R_ARM_SBREL32 9
-#define R_ARM_THM_PC22 10
-#define R_ARM_THM_PC8 11
-#define R_ARM_AMP_VCALL9 12
-#define R_ARM_SWI24 13
-#define R_ARM_THM_SWI8 14
-#define R_ARM_XPC25 15
-#define R_ARM_THM_XPC22 16
-#define R_ARM_COPY 20 /* Copy symbol at runtime */
-#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
-#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
-#define R_ARM_RELATIVE 23 /* Adjust by program base */
-#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
-#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
-#define R_ARM_GOT32 26 /* 32 bit GOT entry */
-#define R_ARM_PLT32 27 /* 32 bit PLT address */
+#define R_ARM_NONE 0 /* No reloc */
+#define R_ARM_PC24 1 /* PC relative 26 bit branch */
+#define R_ARM_ABS32 2 /* Direct 32 bit */
+#define R_ARM_REL32 3 /* PC relative 32 bit */
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5 /* Direct 16 bit */
+#define R_ARM_ABS12 6 /* Direct 12 bit */
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8 /* Direct 8 bit */
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+#define R_ARM_COPY 20 /* Copy symbol at runtime */
+#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
+#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
+#define R_ARM_RELATIVE 23 /* Adjust by program base */
+#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
+#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
+#define R_ARM_GOT32 26 /* 32 bit GOT entry */
+#define R_ARM_PLT32 27 /* 32 bit PLT address */
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
-#define R_ARM_GNU_VTENTRY 100
-#define R_ARM_GNU_VTINHERIT 101
-#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
-#define R_ARM_THM_PC9 103 /* thumb conditional branch */
-#define R_ARM_RXPC25 249
-#define R_ARM_RSBREL32 250
-#define R_ARM_THM_RPC22 251
-#define R_ARM_RREL32 252
-#define R_ARM_RABS22 253
-#define R_ARM_RPC24 254
-#define R_ARM_RBASE 255
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
+#define R_ARM_THM_PC9 103 /* thumb conditional branch */
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS22 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
/* Keep this the last entry. */
-#define R_ARM_NUM 256
+#define R_ARM_NUM 256
/* ARM Aarch64 relocation types */
#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */
@@ -963,385 +1003,385 @@ typedef struct {
#define R_AARCH64_TLS_TPREL32 1033
/* s390 relocations defined by the ABIs */
-#define R_390_NONE 0 /* No reloc. */
-#define R_390_8 1 /* Direct 8 bit. */
-#define R_390_12 2 /* Direct 12 bit. */
-#define R_390_16 3 /* Direct 16 bit. */
-#define R_390_32 4 /* Direct 32 bit. */
-#define R_390_PC32 5 /* PC relative 32 bit. */
-#define R_390_GOT12 6 /* 12 bit GOT offset. */
-#define R_390_GOT32 7 /* 32 bit GOT offset. */
-#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
-#define R_390_COPY 9 /* Copy symbol at runtime. */
-#define R_390_GLOB_DAT 10 /* Create GOT entry. */
-#define R_390_JMP_SLOT 11 /* Create PLT entry. */
-#define R_390_RELATIVE 12 /* Adjust by program base. */
-#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
-#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
-#define R_390_GOT16 15 /* 16 bit GOT offset. */
-#define R_390_PC16 16 /* PC relative 16 bit. */
-#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
-#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
-#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
-#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
-#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
-#define R_390_64 22 /* Direct 64 bit. */
-#define R_390_PC64 23 /* PC relative 64 bit. */
-#define R_390_GOT64 24 /* 64 bit GOT offset. */
-#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
-#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
-#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
-#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
-#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
-#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
-#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
-#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
-#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
-#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
-#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
-#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
-#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
-#define R_390_TLS_GDCALL 38 /* Tag for function call in general
- dynamic TLS code. */
-#define R_390_TLS_LDCALL 39 /* Tag for function call in local
- dynamic TLS code. */
-#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
- block. */
-#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
- block. */
-#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
-#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
-#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
block. */
#define R_390_20 57
/* Keep this the last entry. */
#define R_390_NUM 58
/* x86-64 relocation types */
-#define R_X86_64_NONE 0 /* No reloc */
-#define R_X86_64_64 1 /* Direct 64 bit */
-#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
-#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
-#define R_X86_64_PLT32 4 /* 32 bit PLT address */
-#define R_X86_64_COPY 5 /* Copy symbol at runtime */
-#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
-#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
-#define R_X86_64_RELATIVE 8 /* Adjust by program base */
-#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
-#define R_X86_64_32 10 /* Direct 32 bit zero extended */
-#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
-#define R_X86_64_16 12 /* Direct 16 bit zero extended */
-#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
-#define R_X86_64_8 14 /* Direct 8 bit sign extended */
-#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-#define R_X86_64_NUM 16
+#define R_X86_64_NUM 16
/* Legal values for e_flags field of Elf64_Ehdr. */
-#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
+#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
/* HPPA specific definitions. */
/* Legal values for e_flags field of Elf32_Ehdr. */
-#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
-#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
-#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
-#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
-#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
prediction. */
-#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
-#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
-#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
-#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
-#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
-/* Additional section indeces. */
+/* Additional section indices. */
-#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tentatively declared
symbols in ANSI C. */
-#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
/* Legal values for sh_type field of Elf32_Shdr. */
-#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
-#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
-#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
/* Legal values for sh_flags field of Elf32_Shdr. */
-#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
-#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
-#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
/* Legal values for ST_TYPE subfield of st_info (symbol type). */
-#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
-#define STT_HP_OPAQUE (STT_LOOS + 0x1)
-#define STT_HP_STUB (STT_LOOS + 0x2)
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
/* HPPA relocs. */
-#define R_PARISC_NONE 0 /* No reloc. */
-#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
-#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
-#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
-#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
-#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
-#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
-#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
-#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
-#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
-#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
-#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
-#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
-#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
-#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
-#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
-#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
-#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
-#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
-#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
-#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
-#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
-#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
-#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
-#define R_PARISC_FPTR64 64 /* 64 bits function address. */
-#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
-#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
-#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
-#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
-#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
-#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
-#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
-#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
-#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
-#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
-#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
-#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
-#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
-#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
-#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
-#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
-#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
-#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
-#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
-#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LORESERVE 128
-#define R_PARISC_COPY 128 /* Copy relocation. */
-#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
-#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
-#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
-#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
-#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
-#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
-#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
-#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
-#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_HIRESERVE 255
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
-#define PT_HP_TLS (PT_LOOS + 0x0)
-#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
-#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
-#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
-#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
-#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
-#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
-#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
-#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
-#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
-#define PT_HP_PARALLEL (PT_LOOS + 0x10)
-#define PT_HP_FASTBIND (PT_LOOS + 0x11)
-#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
-#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
-#define PT_HP_STACK (PT_LOOS + 0x14)
-
-#define PT_PARISC_ARCHEXT 0x70000000
-#define PT_PARISC_UNWIND 0x70000001
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
-#define PF_PARISC_SBP 0x08000000
+#define PF_PARISC_SBP 0x08000000
-#define PF_HP_PAGE_SIZE 0x00100000
-#define PF_HP_FAR_SHARED 0x00200000
-#define PF_HP_NEAR_SHARED 0x00400000
-#define PF_HP_CODE 0x01000000
-#define PF_HP_MODIFY 0x02000000
-#define PF_HP_LAZYSWAP 0x04000000
-#define PF_HP_SBP 0x08000000
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
/* IA-64 specific declarations. */
/* Processor specific flags for the Ehdr e_flags field. */
-#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
-#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
-#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
+#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
+#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
+#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
/* Processor specific values for the Phdr p_type field. */
-#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
-#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
+#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
+#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
/* Processor specific flags for the Phdr p_flags field. */
-#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
+#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
/* Processor specific values for the Shdr sh_type field. */
-#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
-#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
+#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
+#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
/* Processor specific flags for the Shdr sh_flags field. */
-#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
-#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
/* Processor specific values for the Dyn d_tag field. */
-#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
-#define DT_IA_64_NUM 1
+#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
+#define DT_IA_64_NUM 1
/* IA-64 relocations. */
-#define R_IA64_NONE 0x00 /* none */
-#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
-#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
-#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
-#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
-#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
-#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
-#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
-#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
-#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
-#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
-#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
-#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
-#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
-#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
-#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
-#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
-#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
-#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
-#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
-#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
-#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
-#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
-#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
-#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
-#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
-#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
-#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
-#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
-#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
-#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
-#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
-#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
-#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
-#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
-#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
-#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
-#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
-#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
-#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
-#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
-#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
-#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
-#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
-#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
-#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
-#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
-#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
-#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
-#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
-#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
-#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
-#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
-#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
-#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
-#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
-#define R_IA64_COPY 0x84 /* copy relocation */
-#define R_IA64_SUB 0x85 /* Addend and symbol difference */
-#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
-#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
-#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
-#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
-#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
-#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
-#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
-#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
-#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
-#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
-#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
-#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
-#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
-#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
-#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
-#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* copy relocation */
+#define R_IA64_SUB 0x85 /* Addend and symbol difference */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
/* RISC-V relocations. */
#define R_RISCV_NONE 0
@@ -1409,47 +1449,47 @@ typedef struct {
#define EF_RISCV_TSO 0x0010
typedef struct elf32_rel {
- Elf32_Addr r_offset;
- Elf32_Word r_info;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
} Elf32_Rel;
typedef struct elf64_rel {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
} Elf64_Rel;
typedef struct elf32_rela{
- Elf32_Addr r_offset;
- Elf32_Word r_info;
- Elf32_Sword r_addend;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
} Elf32_Rela;
typedef struct elf64_rela {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
- Elf64_Sxword r_addend; /* Constant addend used to compute value */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
} Elf64_Rela;
typedef struct elf32_sym{
- Elf32_Word st_name;
- Elf32_Addr st_value;
- Elf32_Word st_size;
- unsigned char st_info;
- unsigned char st_other;
- Elf32_Half st_shndx;
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
} Elf32_Sym;
typedef struct elf64_sym {
- Elf64_Word st_name; /* Symbol name, index in string tbl */
- unsigned char st_info; /* Type and binding attributes */
- unsigned char st_other; /* No defined meaning, 0 */
- Elf64_Half st_shndx; /* Associated section index */
- Elf64_Addr st_value; /* Value of the symbol */
- Elf64_Xword st_size; /* Associated symbol size */
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
} Elf64_Sym;
-#define EI_NIDENT 16
+#define EI_NIDENT 16
/* Special value for e_phnum. This indicates that the real number of
program headers is too large to fit into e_phnum. Instead the real
@@ -1457,30 +1497,30 @@ typedef struct elf64_sym {
#define PN_XNUM 0xffff
typedef struct elf32_hdr{
- unsigned char e_ident[EI_NIDENT];
- Elf32_Half e_type;
- Elf32_Half e_machine;
- Elf32_Word e_version;
- Elf32_Addr e_entry; /* Entry point */
- Elf32_Off e_phoff;
- Elf32_Off e_shoff;
- Elf32_Word e_flags;
- Elf32_Half e_ehsize;
- Elf32_Half e_phentsize;
- Elf32_Half e_phnum;
- Elf32_Half e_shentsize;
- Elf32_Half e_shnum;
- Elf32_Half e_shstrndx;
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
} Elf32_Ehdr;
typedef struct elf64_hdr {
- unsigned char e_ident[16]; /* ELF "magic number" */
+ unsigned char e_ident[16]; /* ELF "magic number" */
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
- Elf64_Addr e_entry; /* Entry point virtual address */
- Elf64_Off e_phoff; /* Program header table file offset */
- Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
@@ -1492,107 +1532,107 @@ typedef struct elf64_hdr {
/* These constants define the permissions on sections in the program
header, p_flags. */
-#define PF_R 0x4
-#define PF_W 0x2
-#define PF_X 0x1
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
typedef struct elf32_phdr{
- Elf32_Word p_type;
- Elf32_Off p_offset;
- Elf32_Addr p_vaddr;
- Elf32_Addr p_paddr;
- Elf32_Word p_filesz;
- Elf32_Word p_memsz;
- Elf32_Word p_flags;
- Elf32_Word p_align;
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
} Elf32_Phdr;
typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
- Elf64_Off p_offset; /* Segment file offset */
- Elf64_Addr p_vaddr; /* Segment virtual address */
- Elf64_Addr p_paddr; /* Segment physical address */
- Elf64_Xword p_filesz; /* Segment size in file */
- Elf64_Xword p_memsz; /* Segment size in memory */
- Elf64_Xword p_align; /* Segment alignment, file & memory */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
} Elf64_Phdr;
/* sh_type */
-#define SHT_NULL 0
-#define SHT_PROGBITS 1
-#define SHT_SYMTAB 2
-#define SHT_STRTAB 3
-#define SHT_RELA 4
-#define SHT_HASH 5
-#define SHT_DYNAMIC 6
-#define SHT_NOTE 7
-#define SHT_NOBITS 8
-#define SHT_REL 9
-#define SHT_SHLIB 10
-#define SHT_DYNSYM 11
-#define SHT_NUM 12
-#define SHT_LOPROC 0x70000000
-#define SHT_HIPROC 0x7fffffff
-#define SHT_LOUSER 0x80000000
-#define SHT_HIUSER 0xffffffff
-#define SHT_MIPS_LIST 0x70000000
-#define SHT_MIPS_CONFLICT 0x70000002
-#define SHT_MIPS_GPTAB 0x70000003
-#define SHT_MIPS_UCODE 0x70000004
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+#define SHT_MIPS_LIST 0x70000000
+#define SHT_MIPS_CONFLICT 0x70000002
+#define SHT_MIPS_GPTAB 0x70000003
+#define SHT_MIPS_UCODE 0x70000004
/* sh_flags */
-#define SHF_WRITE 0x1
-#define SHF_ALLOC 0x2
-#define SHF_EXECINSTR 0x4
-#define SHF_MASKPROC 0xf0000000
-#define SHF_MIPS_GPREL 0x10000000
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+#define SHF_MIPS_GPREL 0x10000000
/* special section indexes */
-#define SHN_UNDEF 0
-#define SHN_LORESERVE 0xff00
-#define SHN_LOPROC 0xff00
-#define SHN_HIPROC 0xff1f
-#define SHN_ABS 0xfff1
-#define SHN_COMMON 0xfff2
-#define SHN_HIRESERVE 0xffff
-#define SHN_MIPS_ACCOMON 0xff00
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+#define SHN_MIPS_ACCOMON 0xff00
typedef struct elf32_shdr {
- Elf32_Word sh_name;
- Elf32_Word sh_type;
- Elf32_Word sh_flags;
- Elf32_Addr sh_addr;
- Elf32_Off sh_offset;
- Elf32_Word sh_size;
- Elf32_Word sh_link;
- Elf32_Word sh_info;
- Elf32_Word sh_addralign;
- Elf32_Word sh_entsize;
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
} Elf32_Shdr;
typedef struct elf64_shdr {
- Elf64_Word sh_name; /* Section name, index in string tbl */
- Elf64_Word sh_type; /* Type of section */
- Elf64_Xword sh_flags; /* Miscellaneous section attributes */
- Elf64_Addr sh_addr; /* Section virtual addr at execution */
- Elf64_Off sh_offset; /* Section file offset */
- Elf64_Xword sh_size; /* Size of section in bytes */
- Elf64_Word sh_link; /* Index of another section */
- Elf64_Word sh_info; /* Additional section information */
- Elf64_Xword sh_addralign; /* Section alignment */
- Elf64_Xword sh_entsize; /* Entry size if section holds table */
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
} Elf64_Shdr;
-#define EI_MAG0 0 /* e_ident[] indexes */
-#define EI_MAG1 1
-#define EI_MAG2 2
-#define EI_MAG3 3
-#define EI_CLASS 4
-#define EI_DATA 5
-#define EI_VERSION 6
-#define EI_OSABI 7
-#define EI_PAD 8
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
#define ELFOSABI_NONE 0 /* UNIX System V ABI */
#define ELFOSABI_SYSV 0 /* Alias. */
@@ -1607,54 +1647,75 @@ typedef struct elf64_shdr {
#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */
+#define ELFOSABI_XTENSA_FDPIC 65 /* Xtensa FDPIC */
#define ELFOSABI_ARM 97 /* ARM */
#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
-#define ELFMAG0 0x7f /* EI_MAG */
-#define ELFMAG1 'E'
-#define ELFMAG2 'L'
-#define ELFMAG3 'F'
-#define ELFMAG "\177ELF"
-#define SELFMAG 4
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
-#define ELFCLASSNONE 0 /* EI_CLASS */
-#define ELFCLASS32 1
-#define ELFCLASS64 2
-#define ELFCLASSNUM 3
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
-#define ELFDATANONE 0 /* e_ident[EI_DATA] */
-#define ELFDATA2LSB 1
-#define ELFDATA2MSB 2
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
-#define EV_NONE 0 /* e_version, EI_VERSION */
-#define EV_CURRENT 1
-#define EV_NUM 2
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
/* Notes used in ET_CORE */
-#define NT_PRSTATUS 1
-#define NT_FPREGSET 2
-#define NT_PRFPREG 2
-#define NT_PRPSINFO 3
-#define NT_TASKSTRUCT 4
-#define NT_AUXV 6
-#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
-#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
-#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
-#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
-#define NT_S390_PREFIX 0x305 /* s390 prefix register */
-#define NT_S390_CTRS 0x304 /* s390 control registers */
-#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
-#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
-#define NT_S390_TIMER 0x301 /* s390 timer register */
-#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
-#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
-#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
-#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
-#define NT_ARM_TLS 0x401 /* ARM TLS register */
-#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
-#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
-#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
-#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */
+#define NT_PRSTATUS 1
+#define NT_FPREGSET 2
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
+#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
+#define NT_S390_PREFIX 0x305 /* s390 prefix register */
+#define NT_S390_CTRS 0x304 /* s390 control registers */
+#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
+#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
+#define NT_S390_TIMER 0x301 /* s390 timer register */
+#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
+#define NT_ARM_TLS 0x401 /* ARM TLS register */
+#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */
+
+/* Defined note types for GNU systems. */
+
+#define NT_GNU_PROPERTY_TYPE_0 5 /* Program property */
+
+/* Values used in GNU .note.gnu.property notes (NT_GNU_PROPERTY_TYPE_0). */
+
+#define GNU_PROPERTY_STACK_SIZE 1
+#define GNU_PROPERTY_NO_COPY_ON_PROTECTED 2
+
+#define GNU_PROPERTY_LOPROC 0xc0000000
+#define GNU_PROPERTY_HIPROC 0xdfffffff
+#define GNU_PROPERTY_LOUSER 0xe0000000
+#define GNU_PROPERTY_HIUSER 0xffffffff
+
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1u << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1u << 1)
/*
* Physical entry point into the kernel.
@@ -1669,16 +1730,16 @@ typedef struct elf64_shdr {
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
- Elf32_Word n_namesz; /* Name size */
- Elf32_Word n_descsz; /* Content size */
- Elf32_Word n_type; /* Content type */
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
} Elf32_Nhdr;
/* Note header in a PT_NOTE section */
typedef struct elf64_note {
- Elf64_Word n_namesz; /* Name size */
- Elf64_Word n_descsz; /* Content size */
- Elf64_Word n_type; /* Content type */
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
} Elf64_Nhdr;
@@ -1703,13 +1764,13 @@ struct elf32_fdpic_loadmap {
#ifdef ELF_CLASS
#if ELF_CLASS == ELFCLASS32
-#define elfhdr elf32_hdr
-#define elf_phdr elf32_phdr
-#define elf_note elf32_note
-#define elf_shdr elf32_shdr
-#define elf_sym elf32_sym
-#define elf_addr_t Elf32_Off
-#define elf_rela elf32_rela
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_shdr elf32_shdr
+#define elf_sym elf32_sym
+#define elf_addr_t Elf32_Off
+#define elf_rela elf32_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf32_Rela
@@ -1719,13 +1780,13 @@ struct elf32_fdpic_loadmap {
#else
-#define elfhdr elf64_hdr
-#define elf_phdr elf64_phdr
-#define elf_note elf64_note
-#define elf_shdr elf64_shdr
-#define elf_sym elf64_sym
-#define elf_addr_t Elf64_Off
-#define elf_rela elf64_rela
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_shdr elf64_shdr
+#define elf_sym elf64_sym
+#define elf_addr_t Elf64_Off
+#define elf_rela elf64_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf64_Rela
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
index db8bfa9a92..0d0aa61d68 100644
--- a/include/exec/address-spaces.h
+++ b/include/exec/address-spaces.h
@@ -19,8 +19,6 @@
* you're one of them.
*/
-#include "exec/memory.h"
-
#ifndef CONFIG_USER_ONLY
/* Get the root memory region. This interface should only be used temporarily
diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h
new file mode 100644
index 0000000000..e5b188cffb
--- /dev/null
+++ b/include/exec/confidential-guest-support.h
@@ -0,0 +1,94 @@
+/*
+ * QEMU Confidential Guest support
+ * This interface describes the common pieces between various
+ * schemes for protecting guest memory or other state against a
+ * compromised hypervisor. This includes memory encryption (AMD's
+ * SEV and Intel's MKTME) or special protection modes (PEF on POWER,
+ * or PV on s390x).
+ *
+ * Copyright Red Hat.
+ *
+ * Authors:
+ * David Gibson <david@gibson.dropbear.id.au>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qom/object.h"
+
+#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support"
+OBJECT_DECLARE_TYPE(ConfidentialGuestSupport,
+ ConfidentialGuestSupportClass,
+ CONFIDENTIAL_GUEST_SUPPORT)
+
+
+struct ConfidentialGuestSupport {
+ Object parent;
+
+ /*
+ * ready: flag set by CGS initialization code once it's ready to
+ * start executing instructions in a potentially-secure
+ * guest
+ *
+ * The definition here is a bit fuzzy, because this is essentially
+ * part of a self-sanity-check, rather than a strict mechanism.
+ *
+ * It's not feasible to have a single point in the common machine
+ * init path to configure confidential guest support, because
+ * different mechanisms have different interdependencies requiring
+ * initialization in different places, often in arch or machine
+ * type specific code. It's also usually not possible to check
+ * for invalid configurations until that initialization code.
+ * That means it would be very easy to have a bug allowing CGS
+ * init to be bypassed entirely in certain configurations.
+ *
+ * Silently ignoring a requested security feature would be bad, so
+ * to avoid that we check late in init that this 'ready' flag is
+ * set if CGS was requested. If the CGS init hasn't happened, and
+ * so 'ready' is not set, we'll abort.
+ */
+ bool ready;
+};
+
+typedef struct ConfidentialGuestSupportClass {
+ ObjectClass parent;
+
+ int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp);
+ int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp);
+} ConfidentialGuestSupportClass;
+
+static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_init) {
+ return klass->kvm_init(cgs, errp);
+ }
+
+ return 0;
+}
+
+static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs,
+ Error **errp)
+{
+ ConfidentialGuestSupportClass *klass;
+
+ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs);
+ if (klass->kvm_reset) {
+ return klass->kvm_reset(cgs, errp);
+ }
+
+ return 0;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index fc403d456b..1a6510fd3b 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,92 +21,23 @@
#include "exec/cpu-common.h"
#include "exec/memory.h"
+#include "exec/tswap.h"
#include "qemu/thread.h"
#include "hw/core/cpu.h"
#include "qemu/rcu.h"
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
-#define EXCP_HLT 0x10001 /* hlt instruction reached */
-#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
-#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
-
/* some important defines:
*
- * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * HOST_BIG_ENDIAN : whether the host cpu is big endian and
* otherwise little endian.
*
- * TARGET_WORDS_BIGENDIAN : same for target cpu
+ * TARGET_BIG_ENDIAN : same for the target cpu
*/
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
#define BSWAP_NEEDED
#endif
-#ifdef BSWAP_NEEDED
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return bswap16(s);
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return bswap32(s);
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return bswap64(s);
-}
-
-static inline void tswap16s(uint16_t *s)
-{
- *s = bswap16(*s);
-}
-
-static inline void tswap32s(uint32_t *s)
-{
- *s = bswap32(*s);
-}
-
-static inline void tswap64s(uint64_t *s)
-{
- *s = bswap64(*s);
-}
-
-#else
-
-static inline uint16_t tswap16(uint16_t s)
-{
- return s;
-}
-
-static inline uint32_t tswap32(uint32_t s)
-{
- return s;
-}
-
-static inline uint64_t tswap64(uint64_t s)
-{
- return s;
-}
-
-static inline void tswap16s(uint16_t *s)
-{
-}
-
-static inline void tswap32s(uint32_t *s)
-{
-}
-
-static inline void tswap64s(uint64_t *s)
-{
-}
-
-#endif
-
#if TARGET_LONG_SIZE == 4
#define tswapl(s) tswap32(s)
#define tswapls(s) tswap32s((uint32_t *)(s))
@@ -120,18 +51,14 @@ static inline void tswap64s(uint64_t *s)
/* Target-endianness CPU memory access functions. These fit into the
* {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
*/
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
#define ldq_p(p) ldq_be_p(p)
-#define ldfl_p(p) ldfl_be_p(p)
-#define ldfq_p(p) ldfq_be_p(p)
#define stw_p(p, v) stw_be_p(p, v)
#define stl_p(p, v) stl_be_p(p, v)
#define stq_p(p, v) stq_be_p(p, v)
-#define stfl_p(p, v) stfl_be_p(p, v)
-#define stfq_p(p, v) stfq_be_p(p, v)
#define ldn_p(p, sz) ldn_be_p(p, sz)
#define stn_p(p, sz, v) stn_be_p(p, sz, v)
#else
@@ -139,13 +66,9 @@ static inline void tswap64s(uint64_t *s)
#define ldsw_p(p) ldsw_le_p(p)
#define ldl_p(p) ldl_le_p(p)
#define ldq_p(p) ldq_le_p(p)
-#define ldfl_p(p) ldfl_le_p(p)
-#define ldfq_p(p) ldfq_le_p(p)
#define stw_p(p, v) stw_le_p(p, v)
#define stl_p(p, v) stl_le_p(p, v)
#define stq_p(p, v) stq_le_p(p, v)
-#define stfl_p(p, v) stfl_le_p(p, v)
-#define stfq_p(p, v) stfq_le_p(p, v)
#define ldn_p(p, sz) ldn_le_p(p, sz)
#define stn_p(p, sz, v) stn_le_p(p, sz, v)
#endif
@@ -154,12 +77,18 @@ static inline void tswap64s(uint64_t *s)
#if defined(CONFIG_USER_ONLY)
#include "exec/user/abitypes.h"
+#include "exec/user/guest-base.h"
-/* On some host systems the guest address space is reserved on the host.
- * This allows the guest address space to be offset to a convenient location.
- */
-extern unsigned long guest_base;
extern bool have_guest_base;
+
+/*
+ * If non-zero, the guest virtual address space is a contiguous subset
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
+ * either from the command-line or by default. The value is the last
+ * byte of the guest address space e.g. UINT32_MAX.
+ *
+ * If zero, the host and guest virtual address spaces are intermingled.
+ */
extern unsigned long reserved_va;
/*
@@ -179,7 +108,7 @@ extern unsigned long reserved_va;
#define GUEST_ADDR_MAX_ \
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
UINT32_MAX : ~0ul)
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
#else
@@ -189,13 +118,13 @@ extern unsigned long reserved_va;
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
#define SUFFIX _cached_slow
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
{
@@ -207,38 +136,31 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
/* Inline fast path for direct RAM access. */
#define ENDIANNESS
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define SUFFIX _cached
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
#endif
/* page related stuff */
#ifdef TARGET_PAGE_BITS_VARY
-typedef struct {
- bool decided;
- int bits;
- target_long mask;
-} TargetPageBits;
-#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
+# include "exec/page-vary.h"
extern const TargetPageBits target_page;
-#else
-extern TargetPageBits target_page;
-#endif
#ifdef CONFIG_DEBUG_TCG
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
-#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
+ (target_long)target_page.mask; })
#else
#define TARGET_PAGE_BITS target_page.bits
-#define TARGET_PAGE_MASK target_page.mask
+#define TARGET_PAGE_MASK ((target_long)target_page.mask)
#endif
#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
#else
@@ -249,31 +171,15 @@ extern TargetPageBits target_page;
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
-/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
- * when intptr_t is 32-bit and we are aligning a long long.
- */
-extern uintptr_t qemu_host_page_size;
-extern intptr_t qemu_host_page_mask;
-
-#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
-#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
-
-/* same as PROT_xxx */
-#define PAGE_READ 0x0001
-#define PAGE_WRITE 0x0002
-#define PAGE_EXEC 0x0004
-#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
-#define PAGE_VALID 0x0008
-/* original state of the write flag (used when tracking self-modifying
- code */
-#define PAGE_WRITE_ORG 0x0010
-/* Invalidate the TLB entry immediately, helpful for s390x
- * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
-#define PAGE_WRITE_INV 0x0040
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */
-#define PAGE_RESERVED 0x0020
+#define PAGE_RESERVED 0x0100
#endif
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
#if defined(CONFIG_USER_ONLY)
void page_dump(FILE *f);
@@ -283,8 +189,61 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address);
-void page_set_flags(target_ulong start, target_ulong end, int flags);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+void page_set_flags(target_ulong start, target_ulong last, int flags);
+void page_reset_target_data(target_ulong start, target_ulong last);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x. Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+ target_ulong len, target_ulong align);
+
+/**
+ * page_get_target_data(address)
+ * @address: guest virtual address
+ *
+ * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
+ * with the guest page at @address, allocating it if necessary. The
+ * caller should already have verified that the address is valid.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
+ */
+void *page_get_target_data(target_ulong address)
+ __attribute__((returns_nonnull));
#endif
CPUArchState *cpu_copy(CPUArchState *env);
@@ -349,9 +308,13 @@ CPUArchState *cpu_copy(CPUArchState *env);
* be signaled by probe_access_flags().
*/
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-#define TLB_MMIO 0
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
#define TLB_WATCHPOINT 0
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return MMU_USER_IDX;
+}
#else
/*
@@ -362,6 +325,9 @@ CPUArchState *cpu_copy(CPUArchState *env);
*
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
* when TARGET_PAGE_BITS_VARY is in effect.
+ *
+ * The count, if not the placement of these bits is known
+ * to tcg/tcg-op-ldst.c, check_max_alignment().
*/
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
@@ -370,19 +336,34 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
-/* Set if TLB entry contains a watchpoint. */
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if TLB entry requires byte swap. */
-#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-/* Use this mask to check interception with an alignment mask
+/*
+ * Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+/* Set if TLB entry requires byte swap. */
+#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
+
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
@@ -391,7 +372,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
* @addr: virtual address to test (must be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
{
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}
@@ -402,88 +383,15 @@ static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
* @addr: virtual address to test (need not be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
-static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
{
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}
-void dump_exec_info(void);
-void dump_opcount_info(void);
#endif /* !CONFIG_USER_ONLY */
-/* Returns: 0 on success, -1 on error */
-int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
- void *ptr, target_ulong len, bool is_write);
-
-int cpu_exec(CPUState *cpu);
-
-/**
- * cpu_set_cpustate_pointers(cpu)
- * @cpu: The cpu object
- *
- * Set the generic pointers in CPUState into the outer object.
- */
-static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
-{
- cpu->parent_obj.env_ptr = &cpu->env;
- cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
-}
-
-/**
- * env_archcpu(env)
- * @env: The architecture environment
- *
- * Return the ArchCPU associated with the environment.
- */
-static inline ArchCPU *env_archcpu(CPUArchState *env)
-{
- return container_of(env, ArchCPU, env);
-}
-
-/**
- * env_cpu(env)
- * @env: The architecture environment
- *
- * Return the CPUState associated with the environment.
- */
-static inline CPUState *env_cpu(CPUArchState *env)
-{
- return &env_archcpu(env)->parent_obj;
-}
-
-/**
- * env_neg(env)
- * @env: The architecture environment
- *
- * Return the CPUNegativeOffsetState associated with the environment.
- */
-static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
-{
- ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
- return &arch_cpu->neg;
-}
-
-/**
- * cpu_neg(cpu)
- * @cpu: The generic CPUState
- *
- * Return the CPUNegativeOffsetState associated with the cpu.
- */
-static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
-{
- ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
- return &arch_cpu->neg;
-}
-
-/**
- * env_tlb(env)
- * @env: The architecture environment
- *
- * Return the CPUTLB state associated with the environment.
- */
-static inline CPUTLB *env_tlb(CPUArchState *env)
-{
- return &env_neg(env)->tlb;
-}
+/* Validate correct placement of CPUArchState. */
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index d5e285d2b5..6d5318895a 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -3,16 +3,34 @@
/* CPU interfaces that are target independent. */
+#include "exec/vaddr.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
+#include "hw/core/cpu.h"
+#include "tcg/debug-assert.h"
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
+
+void cpu_exec_init_all(void);
+void cpu_exec_step_atomic(CPUState *cpu);
+
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
+extern QemuMutex qemu_cpu_list_lock;
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
+unsigned int cpu_list_generation_id_get(void);
-void tcg_flush_softmmu_tlb(CPUState *cs);
+void tcg_iommu_init_notifier_list(CPUState *cpu);
+void tcg_iommu_free_notifier_list(CPUState *cpu);
#if !defined(CONFIG_USER_ONLY)
@@ -22,7 +40,7 @@ enum device_endian {
DEVICE_LITTLE_ENDIAN,
};
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
#else
#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
@@ -39,14 +57,28 @@ typedef uintptr_t ram_addr_t;
# define RAM_ADDR_FMT "%" PRIxPTR
#endif
-extern ram_addr_t ram_size;
-
/* memory API */
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
ram_addr_t qemu_ram_addr_from_host(void *ptr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
RAMBlock *qemu_ram_block_by_name(const char *name);
+
+/*
+ * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock.
+ *
+ * @ptr: The host pointer to translate.
+ * @round_offset: Whether to round the result offset down to a target page
+ * @offset: Will be set to the offset within the returned RAMBlock.
+ *
+ * Returns: RAMBlock (or NULL if not found)
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the BQL, it must have other means of protecting the
+ * pointer, such as a reference to the memory region that owns the RAMBlock.
+ */
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
ram_addr_t *offset);
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
@@ -56,16 +88,42 @@ const char *qemu_ram_get_idstr(RAMBlock *rb);
void *qemu_ram_get_host_addr(RAMBlock *rb);
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
+ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
bool qemu_ram_is_shared(RAMBlock *rb);
+bool qemu_ram_is_noreserve(RAMBlock *rb);
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
bool qemu_ram_is_migratable(RAMBlock *rb);
void qemu_ram_set_migratable(RAMBlock *rb);
void qemu_ram_unset_migratable(RAMBlock *rb);
+bool qemu_ram_is_named_file(RAMBlock *rb);
+int qemu_ram_get_fd(RAMBlock *rb);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
+
void cpu_physical_memory_rw(hwaddr addr, void *buf,
hwaddr len, bool is_write);
static inline void cpu_physical_memory_read(hwaddr addr,
@@ -101,7 +159,123 @@ typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
+int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
+ size_t length);
#endif
+/* Returns: 0 on success, -1 on error */
+int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ void *ptr, size_t len, bool is_write);
+
+/* vl.c */
+void list_cpus(void);
+
+#ifdef CONFIG_TCG
+/**
+ * cpu_unwind_state_data:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @data: output data
+ *
+ * Attempt to load the the unwind state for a host pc occurring in
+ * translated code. If @host_pc is not in translated code, the
+ * function returns false; otherwise @data is loaded.
+ * This is the same unwind info as given to restore_state_to_opc.
+ */
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If @host_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
+
+G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#endif /* CONFIG_TCG */
+G_NORETURN void cpu_loop_exit(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/*
+ * Original state of the write flag (used when tracking self-modifying code)
+ */
+#define PAGE_WRITE_ORG 0x0010
+/*
+ * Invalidate the TLB entry immediately, helpful for s390x
+ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
+ */
+#define PAGE_WRITE_INV 0x0020
+/* For use with page_set_flags: page is being replaced; target_data cleared. */
+#define PAGE_RESET 0x0040
+/* For linux-user, indicates that the page is MAP_ANON. */
+#define PAGE_ANON 0x0080
+
+/* Target-specific bits that will be used via page_get_flags(). */
+#define PAGE_TARGET_1 0x0200
+#define PAGE_TARGET_2 0x0400
+
+/*
+ * For linux-user, indicates that the page is mapped with the same semantics
+ * in both guest and host.
+ */
+#define PAGE_PASSTHROUGH 0x0800
+
+/* accel/tcg/cpu-exec.c */
+int cpu_exec(CPUState *cpu);
+
+/**
+ * env_archcpu(env)
+ * @env: The architecture environment
+ *
+ * Return the ArchCPU associated with the environment.
+ */
+static inline ArchCPU *env_archcpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+/**
+ * env_cpu(env)
+ * @env: The architecture environment
+ *
+ * Return the CPUState associated with the environment.
+ */
+static inline CPUState *env_cpu(CPUArchState *env)
+{
+ return (void *)env - sizeof(CPUState);
+}
+
+#ifndef CONFIG_USER_ONLY
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ *
+ * The user-only version of this function is inline in cpu-all.h,
+ * where it always returns MMU_USER_IDX.
+ */
+static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ int ret = cs->cc->mmu_index(cs, ifetch);
+ tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
+ return ret;
+}
+#endif /* !CONFIG_USER_ONLY */
+
#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 9185632337..3915438b83 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,9 +25,6 @@
#include "qemu/host-utils.h"
#include "qemu/thread.h"
-#ifdef CONFIG_TCG
-#include "tcg-target.h"
-#endif
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
@@ -39,9 +36,6 @@
#ifndef TARGET_LONG_BITS
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
-#ifndef NB_MMU_MODES
-# error NB_MMU_MODES must be defined in cpu-param.h
-#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
@@ -58,36 +52,9 @@
# endif
#endif
-#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
-
-/* target_ulong is the type of a virtual address */
-#if TARGET_LONG_SIZE == 4
-typedef int32_t target_long;
-typedef uint32_t target_ulong;
-#define TARGET_FMT_lx "%08x"
-#define TARGET_FMT_ld "%d"
-#define TARGET_FMT_lu "%u"
-#elif TARGET_LONG_SIZE == 8
-typedef int64_t target_long;
-typedef uint64_t target_ulong;
-#define TARGET_FMT_lx "%016" PRIx64
-#define TARGET_FMT_ld "%" PRId64
-#define TARGET_FMT_lu "%" PRIu64
-#else
-#error TARGET_LONG_SIZE undefined
-#endif
-
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
-
-/* use a fully associative victim tlb of 8 entries */
-#define CPU_VTLB_SIZE 8
-
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
-#define CPU_TLB_ENTRY_BITS 4
-#else
-#define CPU_TLB_ENTRY_BITS 5
-#endif
+#include "exec/target_long.h"
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
@@ -111,138 +78,6 @@ typedef uint64_t target_ulong;
# endif
# endif
-typedef struct CPUTLBEntry {
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
- go directly to ram.
- bit 3 : indicates that the entry is invalid
- bit 2..0 : zero
- */
- union {
- struct {
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- };
- /* padding to get a power of two size */
- uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
- };
-} CPUTLBEntry;
-
-QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
-
-/* The IOTLB is not accessed directly inline by generated TCG code,
- * so the CPUIOTLBEntry layout is not as critical as that of the
- * CPUTLBEntry. (This is also why we don't want to combine the two
- * structs into one.)
- */
-typedef struct CPUIOTLBEntry {
- /*
- * @addr contains:
- * - in the lower TARGET_PAGE_BITS, a physical section number
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
- * must be added to the virtual address to obtain:
- * + the ram_addr_t of the target RAM (if the physical section
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
- * + the offset within the target MemoryRegion (otherwise)
- */
- hwaddr addr;
- MemTxAttrs attrs;
-} CPUIOTLBEntry;
-
-/*
- * Data elements that are per MMU mode, minus the bits accessed by
- * the TCG fast path.
- */
-typedef struct CPUTLBDesc {
- /*
- * Describe a region covering all of the large pages allocated
- * into the tlb. When any page within this region is flushed,
- * we must flush the entire tlb. The region is matched if
- * (addr & large_page_mask) == large_page_addr.
- */
- target_ulong large_page_addr;
- target_ulong large_page_mask;
- /* host time (in ns) at the beginning of the time window */
- int64_t window_begin_ns;
- /* maximum number of entries observed in the window */
- size_t window_max_entries;
- size_t n_used_entries;
- /* The next index to use in the tlb victim table. */
- size_t vindex;
- /* The tlb victim table, in two parts. */
- CPUTLBEntry vtable[CPU_VTLB_SIZE];
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
- /* The iotlb. */
- CPUIOTLBEntry *iotlb;
-} CPUTLBDesc;
-
-/*
- * Data elements that are per MMU mode, accessed by the fast path.
- * The structure is aligned to aid loading the pair with one insn.
- */
-typedef struct CPUTLBDescFast {
- /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
- uintptr_t mask;
- /* The array of tlb entries itself. */
- CPUTLBEntry *table;
-} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
-
-/*
- * Data elements that are shared between all MMU modes.
- */
-typedef struct CPUTLBCommon {
- /* Serialize updates to f.table and d.vtable, and others as noted. */
- QemuSpin lock;
- /*
- * Within dirty, for each bit N, modifications have been made to
- * mmu_idx N since the last time that mmu_idx was flushed.
- * Protected by tlb_c.lock.
- */
- uint16_t dirty;
- /*
- * Statistics. These are not lock protected, but are read and
- * written atomically. This allows the monitor to print a snapshot
- * of the stats without interfering with the cpu.
- */
- size_t full_flush_count;
- size_t part_flush_count;
- size_t elide_flush_count;
-} CPUTLBCommon;
-
-/*
- * The entire softmmu tlb, for all MMU modes.
- * The meaning of each of the MMU modes is defined in the target code.
- * Since this is placed within CPUNegativeOffsetState, the smallest
- * negative offsets are at the end of the struct.
- */
-
-typedef struct CPUTLB {
- CPUTLBCommon c;
- CPUTLBDesc d[NB_MMU_MODES];
- CPUTLBDescFast f[NB_MMU_MODES];
-} CPUTLB;
-
-/* This will be used by TCG backends to compute offsets. */
-#define TLB_MASK_TABLE_OFS(IDX) \
- ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
-
-#else
-
-typedef struct CPUTLB { } CPUTLB;
-
-#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
-
-/*
- * This structure must be placed in ArchCPU immediately
- * before CPUArchState, as a field named "neg".
- */
-typedef struct CPUNegativeOffsetState {
- CPUTLB tlb;
- IcountDecr icount_decr;
-} CPUNegativeOffsetState;
+#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index c14a48f65e..eb8f3f0595 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -4,7 +4,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -28,10 +28,12 @@
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
*
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
*
* sign is:
* (empty): for 32 and 64 bit sizes
@@ -53,10 +55,17 @@
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
* the index to use; the "data" and "code" suffixes take the index from
* cpu_mmu_index().
+ *
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
+ * MemOp including alignment requirements. The alignment will be enforced.
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
+#include "exec/memopidx.h"
+#include "qemu/int128.h"
+#include "cpu.h"
+
#if defined(CONFIG_USER_ONLY)
/* sparc32plus has 64bit long but 32bit space address
* this can make bad result with g2h() and h2g()
@@ -69,23 +78,40 @@ typedef uint64_t abi_ptr;
#define TARGET_ABI_FMT_ptr "%"PRIx64
#endif
+#ifndef TARGET_TAGGED_ADDRESSES
+static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
+{
+ return x;
+}
+#endif
+
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
+static inline void *g2h_untagged(abi_ptr x)
+{
+ return (void *)((uintptr_t)(x) + guest_base);
+}
-#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
-#define guest_addr_valid(x) (1)
-#else
-#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
-#endif
-#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
+static inline void *g2h(CPUState *cs, abi_ptr x)
+{
+ return g2h_untagged(cpu_untagged_addr(cs, x));
+}
-static inline int guest_range_valid(unsigned long start, unsigned long len)
+static inline bool guest_addr_valid_untagged(abi_ulong x)
+{
+ return x <= GUEST_ADDR_MAX;
+}
+
+static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
{
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
}
+#define h2g_valid(x) \
+ (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
+ (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
+
#define h2g_nocheck(x) ({ \
- unsigned long __ret = (unsigned long)(x) - guest_base; \
+ uintptr_t __ret = (uintptr_t)(x) - guest_base; \
(abi_ptr)__ret; \
})
@@ -95,18 +121,16 @@ static inline int guest_range_valid(unsigned long start, unsigned long len)
h2g_nocheck(x); \
})
#else
-typedef target_ulong abi_ptr;
-#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
+typedef vaddr abi_ptr;
+#define TARGET_ABI_FMT_ptr VADDR_PRIx
#endif
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
-
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
-
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
@@ -114,37 +138,31 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
-
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
-
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
-
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
-
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra);
-
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t ra);
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
@@ -152,6 +170,136 @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t ra);
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
+ int mmu_idx, uintptr_t ra);
+
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ int mmu_idx, uintptr_t ra);
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ int mmu_idx, uintptr_t ra);
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra);
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra);
+
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint32_t cmpv, uint32_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
+ uint64_t cmpv, uint64_t newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
+ (CPUArchState *env, abi_ptr addr, TYPE val, \
+ MemOpIdx oi, uintptr_t retaddr);
+
+#ifdef CONFIG_ATOMIC64
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
+#else
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
+#endif
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 cmpv, Int128 newv,
+ MemOpIdx oi, uintptr_t retaddr);
+
#if defined(CONFIG_USER_ONLY)
extern __thread uintptr_t helper_retaddr;
@@ -176,192 +324,61 @@ static inline void clear_helper_retaddr(void)
helper_retaddr = 0;
}
-/*
- * Provide the same *_mmuidx_ra interface as for softmmu.
- * The mmu_idx argument is ignored.
- */
-
-static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldub_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsb_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_lduw_be_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsw_be_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldl_be_data_ra(env, addr, ra);
-}
-
-static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldq_be_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_lduw_le_data_ra(env, addr, ra);
-}
-
-static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldsw_le_data_ra(env, addr, ra);
-}
-
-static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldl_le_data_ra(env, addr, ra);
-}
-
-static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return cpu_ldq_le_data_ra(env, addr, ra);
-}
-
-static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx, uintptr_t ra)
-{
- cpu_stb_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stw_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stl_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stq_be_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stw_le_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stl_le_data_ra(env, addr, val, ra);
-}
-
-static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, int mmu_idx,
- uintptr_t ra)
-{
- cpu_stq_le_data_ra(env, addr, val, ra);
-}
-
#else
-/* Needed for TCG_OVERSIZED_GUEST */
-#include "tcg/tcg.h"
+#include "tcg/oversized-guest.h"
-static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
+static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
+ MMUAccessType access_type)
{
-#if TCG_OVERSIZED_GUEST
- return entry->addr_write;
+ /* Do not rearrange the CPUTLBEntry structure members. */
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
+ MMU_DATA_LOAD * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
+ MMU_DATA_STORE * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
+ MMU_INST_FETCH * sizeof(uint64_t));
+
+#if TARGET_LONG_BITS == 32
+ /* Use qatomic_read, in case of addr_write; only care about low bits. */
+ const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
+ ptr += HOST_BIG_ENDIAN;
+ return qatomic_read(ptr);
#else
- return atomic_read(&entry->addr_write);
+ const uint64_t *ptr = &entry->addr_idx[access_type];
+# if TCG_OVERSIZED_GUEST
+ return *ptr;
+# else
+ /* ofs might correspond to .addr_write, so use qatomic_read */
+ return qatomic_read(ptr);
+# endif
#endif
}
+static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
+{
+ return tlb_read_idx(entry, MMU_DATA_STORE);
+}
+
/* Find the TLB index corresponding to the mmu_idx + address pair. */
-static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
{
- uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+ uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
return (addr >> TARGET_PAGE_BITS) & size_mask;
}
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
-static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
- target_ulong addr)
+static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
{
- return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
+ return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
}
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra);
-
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
-
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t retaddr);
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t retaddr);
-
#endif /* defined(CONFIG_USER_ONLY) */
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
# define cpu_lduw_data cpu_lduw_be_data
# define cpu_ldsw_data cpu_ldsw_be_data
# define cpu_ldl_data cpu_ldl_be_data
@@ -407,6 +424,15 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
#endif
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra);
+
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
@@ -439,7 +465,7 @@ static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
- return g2h(addr);
+ return g2h(env_cpu(env), addr);
}
#else
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index a62cfb28d5..6da1462c4f 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,6 +26,5 @@
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
-void tlb_flush_counts(size_t *full, size_t *part, size_t *elide);
#endif
#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 3cf88272df..3e53501691 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,59 +21,11 @@
#define EXEC_ALL_H
#include "cpu.h"
-#include "exec/tb-context.h"
-#ifdef CONFIG_TCG
-#include "exec/cpu_ldst.h"
-#endif
-#include "sysemu/cpus.h"
-
-/* allow to see translation results - the slowdown should be negligible, so we leave it */
-#define DEBUG_DISAS
-
-/* Page tracking code uses ram addresses in system mode, and virtual
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
- type. */
#if defined(CONFIG_USER_ONLY)
-typedef abi_ulong tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
-#else
-typedef ram_addr_t tb_page_addr_t;
-#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#include "exec/cpu_ldst.h"
#endif
-
-#include "qemu/log.h"
-
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
-void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
- target_ulong *data);
-
-void cpu_gen_init(void);
-
-/**
- * cpu_restore_state:
- * @cpu: the vCPU state is to be restore to
- * @searched_pc: the host PC the fault occurred at
- * @will_exit: true if the TB executed will be interrupted after some
- cpu adjustments. Required for maintaining the correct
- icount valus
- * @return: true if state was restored, false otherwise
- *
- * Attempt to restore the state for a fault occurring in translated
- * code. If the searched_pc is not in translated code no state is
- * restored and the function returns false.
- */
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
-
-void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
-void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-TranslationBlock *tb_gen_code(CPUState *cpu,
- target_ulong pc, target_ulong cs_base,
- uint32_t flags,
- int cflags);
-
-void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
-void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
-void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#include "exec/translation-block.h"
+#include "qemu/clang-tsa.h"
/**
* cpu_loop_exit_requested:
@@ -89,34 +41,9 @@ void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
- return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
+ return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
}
-#if !defined(CONFIG_USER_ONLY)
-void cpu_reloading_memory_map(void);
-/**
- * cpu_address_space_init:
- * @cpu: CPU to add this address space to
- * @asidx: integer index of this address space
- * @prefix: prefix to be used as name of address space
- * @mr: the root memory region of address space
- *
- * Add the specified address space to the CPU's cpu_ases list.
- * The address space added with @asidx 0 is the one used for the
- * convenience pointer cpu->as.
- * The target-specific code which registers ASes is responsible
- * for defining what semantics address space 0, 1, 2, etc have.
- *
- * Before the first call to this function, the caller must set
- * cpu->num_ases to the total number of address spaces it needs
- * to support.
- *
- * Note that with KVM only one address space is supported.
- */
-void cpu_address_space_init(CPUState *cpu, int asidx,
- const char *prefix, MemoryRegion *mr);
-#endif
-
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* cputlb.c */
/**
@@ -137,7 +64,7 @@ void tlb_destroy(CPUState *cpu);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
@@ -146,7 +73,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
@@ -158,7 +85,7 @@ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
@@ -193,7 +120,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
@@ -204,7 +131,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
@@ -218,7 +145,7 @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
* tlb_flush_by_mmuidx:
@@ -251,10 +178,76 @@ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
* depend on when the guests translation ends the TB.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+
+/**
+ * tlb_flush_page_bits_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
+ */
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits);
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
+ (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
+
+/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits);
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits);
+
+/**
+ * tlb_set_page_full:
+ * @cpu: CPU context
+ * @mmu_idx: mmu index of the tlb to modify
+ * @addr: virtual address of the entry to add
+ * @full: the details of the tlb entry
+ *
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
+ * @full must be filled, except for xlat_section, and constitute
+ * the complete description of the translated page.
+ *
+ * This is generally called by the target tlb_fill function after
+ * having performed a successful page table walk to find the physical
+ * address and attributes for the translation.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
+ * used by tlb_flush_page.
+ */
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
+ CPUTLBEntryFull *full);
+
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
- * @vaddr: virtual address of page to add entry for
+ * @addr: virtual address of page to add entry for
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
@@ -262,7 +255,7 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
* @size: size of the page in bytes
*
* Add an entry to this CPU's TLB (a mapping from virtual address
- * @vaddr to physical address @paddr) with the specified memory
+ * @addr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find
@@ -273,18 +266,18 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, target_ulong size);
+ int prot, int mmu_idx, vaddr size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes.
*/
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
+ int mmu_idx, vaddr size);
#else
static inline void tlb_init(CPUState *cpu)
{
@@ -292,14 +285,13 @@ static inline void tlb_init(CPUState *cpu)
static inline void tlb_destroy(CPUState *cpu)
{
}
-static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
+static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
- target_ulong addr)
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
@@ -312,7 +304,7 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{
}
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
- target_ulong addr, uint16_t idxmap)
+ vaddr addr, uint16_t idxmap)
{
}
@@ -320,12 +312,12 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
- target_ulong addr,
+ vaddr addr,
uint16_t idxmap)
{
}
@@ -337,6 +329,42 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
}
+static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
+ vaddr addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
+ uint16_t idxmap, unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
+ vaddr len, uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ vaddr addr,
+ vaddr len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
/**
* probe_access:
@@ -355,16 +383,16 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
* Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O.
*/
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
-static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
@@ -374,6 +402,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* probe_access_flags:
* @env: CPUArchState
* @addr: guest virtual address to look up
+ * @size: size of the access
* @access_type: read, write or execute permission
* @mmu_idx: MMU index to use for lookup
* @nonfault: suppress the fault
@@ -388,135 +417,108 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
-int probe_access_flags(CPUArchState *env, target_ulong addr,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+#ifndef CONFIG_USER_ONLY
+/**
+ * probe_access_full:
+ * Like probe_access_flags, except also return into @pfull.
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ */
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost,
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
+
+/**
+ * probe_access_mmu() - Like probe_access_full except cannot fault and
+ * doesn't trigger instrumentation.
+ *
+ * @env: CPUArchState
+ * @vaddr: virtual address to probe
+ * @size: size of the probe
+ * @access_type: read, write or execute permission
+ * @mmu_idx: softmmu index
+ * @phost: ptr to return value host address or NULL
+ * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * Returns: TLB flags as per probe_access_flags()
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
+
+#endif
-/* Estimated block size for TB allocation. */
-/* ??? The following is based on a 2015 survey of x86_64 host output.
- Better would seem to be some sort of dynamically sized TB array,
- adapting to the block sizes actually being produced. */
-#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 400
+static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ return tb->itree.start;
#else
-#define CODE_GEN_AVG_BLOCK_SIZE 150
+ return tb->page_addr[0];
#endif
+}
-/*
- * Translation Cache-related fields of a TB.
- * This struct exists just for convenience; we keep track of TB's in a binary
- * search tree, and the only fields needed to compare TB's in the tree are
- * @ptr and @size.
- * Note: the address of search data can be obtained by adding @size to @ptr.
- */
-struct tb_tc {
- void *ptr; /* pointer to the translated code */
- size_t size;
-};
-
-struct TranslationBlock {
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
- target_ulong cs_base; /* CS base for this block */
- uint32_t flags; /* flags defining in which context the code was generated */
- uint16_t size; /* size of target code for this block (1 <=
- size <= TARGET_PAGE_SIZE) */
- uint16_t icount;
- uint32_t cflags; /* compile flags */
-#define CF_COUNT_MASK 0x00007fff
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
-#define CF_NOCACHE 0x00010000 /* To be freed after execution */
-#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
-#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
-#define CF_CLUSTER_SHIFT 24
-/* cflags' mask for hashing/comparison */
-#define CF_HASH_MASK \
- (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
-
- /* Per-vCPU dynamic tracing state used to generate this TB */
- uint32_t trace_vcpu_dstate;
-
- struct tb_tc tc;
-
- /* original tb when cflags has CF_NOCACHE */
- struct TranslationBlock *orig_tb;
- /* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[].
- The list is protected by the TB's page('s) lock(s) */
- uintptr_t page_next[2];
- tb_page_addr_t page_addr[2];
-
- /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
- QemuSpin jmp_lock;
-
- /* The following data are used to directly call another TB from
- * the code of this one. This can be done either by emitting direct or
- * indirect native jump instructions. These jumps are reset so that the TB
- * just continues its execution. The TB can be linked to another one by
- * setting one of the jump targets (or patching the jump instruction). Only
- * two of such jumps are supported.
- */
- uint16_t jmp_reset_offset[2]; /* offset of original jump target */
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
- uintptr_t jmp_target_arg[2]; /* target address or offset */
+static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
+{
+#ifdef CONFIG_USER_ONLY
+ tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
+ return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
+#else
+ return tb->page_addr[1];
+#endif
+}
+static inline void tb_set_page_addr0(TranslationBlock *tb,
+ tb_page_addr_t addr)
+{
+#ifdef CONFIG_USER_ONLY
+ tb->itree.start = addr;
/*
- * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
- * Each TB can have two outgoing jumps, and therefore can participate
- * in two lists. The list entries are kept in jmp_list_next[2]. The least
- * significant bit (LSB) of the pointers in these lists is used to encode
- * which of the two list entries is to be used in the pointed TB.
- *
- * List traversals are protected by jmp_lock. The destination TB of each
- * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
- * can be acquired from any origin TB.
- *
- * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
- * being invalidated, so that no further outgoing jumps from it can be set.
- *
- * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
- * to a destination TB that has CF_INVALID set.
+ * To begin, we record an interval of one byte. When the translation
+ * loop encounters a second page, the interval will be extended to
+ * include the first byte of the second page, which is sufficient to
+ * allow tb_page_addr1() above to work properly. The final corrected
+ * interval will be set by tb_page_add() from tb->size before the
+ * node is added to the interval tree.
*/
- uintptr_t jmp_list_head;
- uintptr_t jmp_list_next[2];
- uintptr_t jmp_dest[2];
-};
-
-extern bool parallel_cpus;
+ tb->itree.last = addr;
+#else
+ tb->page_addr[0] = addr;
+#endif
+}
-/* Hide the atomic_read to make code a little easier on the eyes */
-static inline uint32_t tb_cflags(const TranslationBlock *tb)
+static inline void tb_set_page_addr1(TranslationBlock *tb,
+ tb_page_addr_t addr)
{
- return atomic_read(&tb->cflags);
+#ifdef CONFIG_USER_ONLY
+ /* Extend the interval to the first byte of the second page. See above. */
+ tb->itree.last = addr;
+#else
+ tb->page_addr[1] = addr;
+#endif
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(void)
-{
- return (parallel_cpus ? CF_PARALLEL : 0)
- | (use_icount ? CF_USE_ICOUNT : 0);
-}
+uint32_t curr_cflags(CPUState *cpu);
/* TranslationBlock invalidate API */
-#if defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(target_ulong addr);
-void tb_invalidate_phys_range(target_ulong start, target_ulong end);
-#else
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
-#endif
-void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
#if defined(CONFIG_TCG_INTERPRETER)
-extern uintptr_t tci_tb_ptr;
+extern __thread uintptr_t tci_tb_ptr;
# define GETPC() tci_tb_ptr
#else
# define GETPC() \
@@ -532,14 +534,6 @@ extern uintptr_t tci_tb_ptr;
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void)
-{
-}
-#endif
-
#if !defined(CONFIG_USER_ONLY)
/**
@@ -555,49 +549,25 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
#endif
-#if defined(CONFIG_USER_ONLY)
-void mmap_lock(void);
-void mmap_unlock(void);
-bool have_mmap_lock(void);
-
/**
- * get_page_addr_code() - user-mode version
+ * get_page_addr_code_hostp()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
- * Returns @addr.
- */
-static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
- target_ulong addr)
-{
- return addr;
-}
-
-/**
- * get_page_addr_code_hostp() - user-mode version
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
*
- * Returns @addr.
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
*
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
- * is kept.
+ * Note: this function can trigger an exception.
*/
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
- target_ulong addr,
- void **hostp)
-{
- if (hostp) {
- *hostp = g2h(addr);
- }
- return addr;
-}
-#else
-static inline void mmap_lock(void) {}
-static inline void mmap_unlock(void) {}
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
+ void **hostp);
/**
- * get_page_addr_code() - full-system version
+ * get_page_addr_code()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
@@ -607,30 +577,85 @@ static inline void mmap_unlock(void) {}
*
* Note: this function can trigger an exception.
*/
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ vaddr addr)
+{
+ return get_page_addr_code_hostp(env, addr, NULL);
+}
+
+#if defined(CONFIG_USER_ONLY)
+void TSA_NO_TSA mmap_lock(void);
+void TSA_NO_TSA mmap_unlock(void);
+bool have_mmap_lock(void);
+
+static inline void mmap_unlock_guard(void *unused)
+{
+ mmap_unlock();
+}
+
+#define WITH_MMAP_LOCK_GUARD() \
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
/**
- * get_page_addr_code_hostp() - full-system version
- * @env: CPUArchState
- * @addr: guest virtual address of guest code
+ * adjust_signal_pc:
+ * @pc: raw pc from the host signal ucontext_t.
+ * @is_write: host memory operation was write, or read-modify-write.
*
- * See get_page_addr_code() (full-system version) for documentation on the
- * return value.
- *
- * Sets *@hostp (when @hostp is non-NULL) as follows.
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
- * to the host address where @addr's content is kept.
+ * Alter @pc as required for unwinding. Return the type of the
+ * guest memory access -- host reads may be for guest execution.
+ */
+MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
+
+/**
+ * handle_sigsegv_accerr_write:
+ * @cpu: the cpu context
+ * @old_set: the sigset_t from the signal ucontext_t
+ * @host_pc: the host pc, adjusted for the signal
+ * @host_addr: the host address of the fault
*
- * Note: this function can trigger an exception.
+ * Return true if the write fault has been handled, and should be re-tried.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
- void **hostp);
+bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
+ uintptr_t host_pc, abi_ptr guest_addr);
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+/**
+ * cpu_loop_exit_sigsegv:
+ * @cpu: the cpu context
+ * @addr: the guest address of the fault
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGSEGV, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
-/* exec.c */
-void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
+/**
+ * cpu_loop_exit_sigbus:
+ * @cpu: the cpu context
+ * @addr: the guest address of the alignment fault
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
+ */
+G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type,
+ uintptr_t ra);
+
+#else
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+#define WITH_MMAP_LOCK_GUARD()
+
+void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUState *cpu, vaddr addr);
+void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
@@ -640,7 +665,4 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif
-/* vl.c */
-extern int singlestep;
-
#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index 94d8f83e92..eb14b91139 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -10,194 +10,138 @@
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
-#ifdef NEED_CPU_H
-#include "cpu.h"
+typedef struct GDBFeature {
+ const char *xmlname;
+ const char *xml;
+ const char *name;
+ const char * const *regs;
+ int num_regs;
+} GDBFeature;
+
+typedef struct GDBFeatureBuilder {
+ GDBFeature *feature;
+ GPtrArray *xml;
+ GPtrArray *regs;
+ int base_reg;
+} GDBFeatureBuilder;
-typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
- target_ulong ret, target_ulong err);
+
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_get_reg_cb)(CPUState *cpu, GByteArray *buf, int reg);
+typedef int (*gdb_set_reg_cb)(CPUState *cpu, uint8_t *buf, int reg);
/**
- * gdb_do_syscall:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * ...: list of arguments to interpolate into @fmt
- *
- * Send a GDB syscall request. This function will return immediately;
- * the callback function will be called later when the remote system
- * call has completed.
- *
- * @fmt should be in the 'call-id,parameter,parameter...' format documented
- * for the F request packet in the GDB remote protocol. A limited set of
- * printf-style format specifiers is supported:
- * %x - target_ulong argument printed in hex
- * %lx - 64-bit argument printed in hex
- * %s - string pointer (target_ulong) and length (int) pair
- */
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
-/**
- * gdb_do_syscallv:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * @va: arguments to interpolate into @fmt
- *
- * As gdb_do_syscall, but taking a va_list rather than a variable
- * argument list.
+ * gdb_init_cpu(): Initialize the CPU for gdbstub.
+ * @cpu: The CPU to be initialized.
*/
-void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
-int use_gdb_syscalls(void);
-void gdb_set_stop_cpu(CPUState *cpu);
-void gdb_exit(CPUArchState *, int);
-#ifdef CONFIG_USER_ONLY
+void gdb_init_cpu(CPUState *cpu);
+
/**
- * gdb_handlesig: yield control to gdb
- * @cpu: CPU
- * @sig: if non-zero, the signal number which caused us to stop
- *
- * This function yields control to gdb, when a user-mode-only target
- * needs to stop execution. If @sig is non-zero, then we will send a
- * stop packet to tell gdb that we have stopped because of this signal.
- *
- * This function will block (handling protocol requests from gdb)
- * until gdb tells us to continue target execution. When it does
- * return, the return value is a signal to deliver to the target,
- * or 0 if no signal should be delivered, ie the signal that caused
- * us to stop should be ignored.
+ * gdb_register_coprocessor() - register a supplemental set of registers
+ * @cpu - the CPU associated with registers
+ * @get_reg - get function (gdb reading)
+ * @set_reg - set function (gdb modifying)
+ * @num_regs - number of registers in set
+ * @xml - xml name of set
+ * @gpos - non-zero to append to "general" register set at @gpos
*/
-int gdb_handlesig(CPUState *, int);
-void gdb_signalled(CPUArchState *, int);
-void gdbserver_fork(CPUState *);
-#endif
-/* Get or set a register. Returns the size of the register. */
-typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
-typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
void gdb_register_coprocessor(CPUState *cpu,
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
- int num_regs, const char *xml, int g_pos);
+ const GDBFeature *feature, int g_pos);
-/*
- * The GDB remote protocol transfers values in target byte order. As
- * the gdbstub may be batching up several register values we always
- * append to the array.
+/**
+ * gdbserver_start: start the gdb server
+ * @port_or_device: connection spec for gdb
+ *
+ * For CONFIG_USER this is either a tcp port or a path to a fifo. For
+ * system emulation you can use a full chardev spec for your gdbserver
+ * port.
*/
+int gdbserver_start(const char *port_or_device);
-static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
-{
- g_byte_array_append(buf, &val, 1);
- return 1;
-}
-
-static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
-{
- uint16_t to_word = tswap16(val);
- g_byte_array_append(buf, (uint8_t *) &to_word, 2);
- return 2;
-}
-
-static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
-{
- uint32_t to_long = tswap32(val);
- g_byte_array_append(buf, (uint8_t *) &to_long, 4);
- return 4;
-}
-
-static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
-{
- uint64_t to_quad = tswap64(val);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- return 8;
-}
-
-static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
- uint64_t val_lo)
-{
- uint64_t to_quad;
-#ifdef TARGET_WORDS_BIGENDIAN
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#else
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#endif
- return 16;
-}
-
-static inline int gdb_get_float32(GByteArray *array, float32 val)
-{
- uint8_t buf[sizeof(CPU_FloatU)];
-
- stfl_p(buf, val);
- g_byte_array_append(array, buf, sizeof(buf));
-
- return sizeof(buf);
-}
-
-static inline int gdb_get_float64(GByteArray *array, float64 val)
-{
- uint8_t buf[sizeof(CPU_DoubleU)];
-
- stfq_p(buf, val);
- g_byte_array_append(array, buf, sizeof(buf));
+/**
+ * gdb_feature_builder_init() - Initialize GDBFeatureBuilder.
+ * @builder: The builder to be initialized.
+ * @feature: The feature to be filled.
+ * @name: The name of the feature.
+ * @xmlname: The name of the XML.
+ * @base_reg: The base number of the register ID.
+ */
+void gdb_feature_builder_init(GDBFeatureBuilder *builder, GDBFeature *feature,
+ const char *name, const char *xmlname,
+ int base_reg);
- return sizeof(buf);
-}
+/**
+ * gdb_feature_builder_append_tag() - Append a tag.
+ * @builder: The builder.
+ * @format: The format of the tag.
+ * @...: The values to be formatted.
+ */
+void G_GNUC_PRINTF(2, 3)
+gdb_feature_builder_append_tag(const GDBFeatureBuilder *builder,
+ const char *format, ...);
-static inline int gdb_get_zeroes(GByteArray *array, size_t len)
-{
- guint oldlen = array->len;
- g_byte_array_set_size(array, oldlen + len);
- memset(array->data + oldlen, 0, len);
+/**
+ * gdb_feature_builder_append_reg() - Append a register.
+ * @builder: The builder.
+ * @name: The register's name; it must be unique within a CPU.
+ * @bitsize: The register's size, in bits.
+ * @regnum: The offset of the register's number in the feature.
+ * @type: The type of the register.
+ * @group: The register group to which this register belongs; it can be NULL.
+ */
+void gdb_feature_builder_append_reg(const GDBFeatureBuilder *builder,
+ const char *name,
+ int bitsize,
+ int regnum,
+ const char *type,
+ const char *group);
- return len;
-}
+/**
+ * gdb_feature_builder_end() - End building GDBFeature.
+ * @builder: The builder.
+ */
+void gdb_feature_builder_end(const GDBFeatureBuilder *builder);
/**
- * gdb_get_reg_ptr: get pointer to start of last element
- * @len: length of element
+ * gdb_find_static_feature() - Find a static feature.
+ * @xmlname: The name of the XML.
*
- * This is a helper function to extract the pointer to the last
- * element for additional processing. Some front-ends do additional
- * dynamic swapping of the elements based on CPU state.
+ * Return: The static feature.
*/
-static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len)
-{
- return buf->data + buf->len - len;
-}
-
-#if TARGET_LONG_BITS == 64
-#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
-#define ldtul_p(addr) ldq_p(addr)
-#else
-#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
-#define ldtul_p(addr) ldl_p(addr)
-#endif
-
-#endif
+const GDBFeature *gdb_find_static_feature(const char *xmlname);
/**
- * gdbserver_start: start the gdb server
- * @port_or_device: connection spec for gdb
+ * gdb_read_register() - Read a register associated with a CPU.
+ * @cpu: The CPU associated with the register.
+ * @buf: The buffer that the read register will be appended to.
+ * @reg: The register's number returned by gdb_find_feature_register().
*
- * For CONFIG_USER this is either a tcp port or a path to a fifo. For
- * system emulation you can use a full chardev spec for your gdbserver
- * port.
+ * Return: The number of read bytes.
*/
-int gdbserver_start(const char *port_or_device);
+int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
-void gdbserver_cleanup(void);
+/**
+ * typedef GDBRegDesc - a register description from gdbstub
+ */
+typedef struct {
+ int gdb_reg;
+ const char *name;
+ const char *feature_name;
+} GDBRegDesc;
/**
- * gdb_has_xml:
- * This is an ugly hack to cope with both new and old gdb.
- * If gdb sends qXfer:features:read then assume we're talking to a newish
- * gdb that understands target descriptions.
+ * gdb_get_register_list() - Return list of all registers for CPU
+ * @cpu: The CPU being searched
+ *
+ * Returns a GArray of GDBRegDesc, caller frees array but not the
+ * const strings.
*/
-extern bool gdb_has_xml;
+GArray *gdb_get_register_list(CPUState *cpu);
+
+void gdb_set_stop_cpu(CPUState *cpu);
-/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
-extern const char *const xml_builtin[][2];
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.py */
+extern const GDBFeature gdb_static_features[];
#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
deleted file mode 100644
index 822c43cfd3..0000000000
--- a/include/exec/gen-icount.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef GEN_ICOUNT_H
-#define GEN_ICOUNT_H
-
-#include "qemu/timer.h"
-
-/* Helpers for instruction counting code generation. */
-
-static TCGOp *icount_start_insn;
-
-static inline void gen_io_start(void)
-{
- TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
- tcg_temp_free_i32(tmp);
-}
-
-/*
- * cpu->can_do_io is cleared automatically at the beginning of
- * each translation block. The cost is minimal and only paid
- * for -icount, plus it would be very easy to forget doing it
- * in the translator. Therefore, backends only need to call
- * gen_io_start.
- */
-static inline void gen_io_end(void)
-{
- TCGv_i32 tmp = tcg_const_i32(0);
- tcg_gen_st_i32(tmp, cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
- tcg_temp_free_i32(tmp);
-}
-
-static inline void gen_tb_start(TranslationBlock *tb)
-{
- TCGv_i32 count, imm;
-
- tcg_ctx->exitreq_label = gen_new_label();
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- count = tcg_temp_local_new_i32();
- } else {
- count = tcg_temp_new_i32();
- }
-
- tcg_gen_ld_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u32) -
- offsetof(ArchCPU, env));
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- imm = tcg_temp_new_i32();
- /* We emit a movi with a dummy immediate argument. Keep the insn index
- * of the movi so that we later (when we know the actual insn count)
- * can update the immediate argument with the actual insn count. */
- tcg_gen_movi_i32(imm, 0xdeadbeef);
- icount_start_insn = tcg_last_op();
-
- tcg_gen_sub_i32(count, count, imm);
- tcg_temp_free_i32(imm);
- }
-
- tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
-
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- tcg_gen_st16_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u16.low) -
- offsetof(ArchCPU, env));
- gen_io_end();
- }
-
- tcg_temp_free_i32(count);
-}
-
-static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
-{
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
- /* Update the num_insn immediate parameter now that we know
- * the actual insn count. */
- tcg_set_insn_param(icount_start_insn, 1, num_insns);
- }
-
- gen_set_label(tcg_ctx->exitreq_label);
- tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
-}
-
-#endif
diff --git a/include/exec/helper-gen-common.h b/include/exec/helper-gen-common.h
new file mode 100644
index 0000000000..5d6d78a625
--- /dev/null
+++ b/include/exec/helper-gen-common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
+
+#ifndef HELPER_GEN_COMMON_H
+#define HELPER_GEN_COMMON_H
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_GEN_COMMON_H */
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
index 29c02f85dc..f7ec155699 100644
--- a/include/exec/helper-gen.h
+++ b/include/exec/helper-gen.h
@@ -1,97 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands generation functions for tcg opcodes. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ */
#ifndef HELPER_GEN_H
#define HELPER_GEN_H
-#include "exec/helper-head.h"
+#include "exec/helper-gen-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
-{ \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
-}
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1)) \
-{ \
- TCGTemp *args[1] = { dh_arg(t1, 1) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
-}
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
-{ \
- TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
-}
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
-{ \
- TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
-}
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
- dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
-{ \
- TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
- dh_arg(t3, 3), dh_arg(t4, 4) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
-}
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
-{ \
- TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
-}
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
-{ \
- TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
-}
-
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
- dh_arg_decl(t7, 7)) \
-{ \
- TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
- dh_arg(t7, 7) }; \
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 7, args); \
-}
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "trace/generated-helpers-wrappers.h"
-#include "tcg-runtime.h"
-#include "plugin-helpers.h"
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
-#undef GEN_HELPER
+#define HELPER_H "helper.h"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-gen.h.inc b/include/exec/helper-gen.h.inc
new file mode 100644
index 0000000000..c009641517
--- /dev/null
+++ b/include/exec/helper-gen.h.inc
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands generation functions for tcg opcodes.
+ * Define HELPER_H for the header file to be expanded,
+ * and static inline to change from global file scope.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h"
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1)) \
+{ \
+ tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1)); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
+{ \
+ tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2)); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), \
+ dh_arg(t3, 3), dh_arg(t4, 4)); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5)); \
+}
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
+{ \
+ tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
+}
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
+extern TCGHelperInfo glue(helper_info_, name); \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
+ dh_arg_decl(t7, 7)) \
+{ \
+ tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
+ dh_arg(t7, 7)); \
+}
+
+#include HELPER_H
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
index 3094c7946d..28ceab0a46 100644
--- a/include/exec/helper-head.h
+++ b/include/exec/helper-head.h
@@ -1,23 +1,13 @@
-/* Helper file for declaring TCG helper functions.
- Used by other helper files.
-
- Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
- functions. Names should be specified without the helper_ prefix, and
- the return and argument types specified. 3 basic types are understood
- (i32, i64 and ptr). Additional aliases are provided for convenience and
- to match the types used by the C helper implementation.
-
- The target helper.h should be included in all files that use/define
- helper functions. THis will ensure that function prototypes are
- consistent. In addition it should be included an extra two times for
- helper.c, defining:
- GEN_HELPER 1 to produce op generation functions (gen_helper_*)
- GEN_HELPER 2 to do runtime registration helper functions.
+/*
+ * Helper file for declaring TCG helper functions.
+ * Used by other helper files.
*/
#ifndef EXEC_HELPER_HEAD_H
#define EXEC_HELPER_HEAD_H
+#include "fpu/softfloat-types.h"
+
#define HELPER(name) glue(helper_, name)
/* Some types that make sense in C, but not for TCG. */
@@ -26,11 +16,13 @@
#define dh_alias_int i32
#define dh_alias_i64 i64
#define dh_alias_s64 i64
+#define dh_alias_i128 i128
#define dh_alias_f16 i32
#define dh_alias_f32 i32
#define dh_alias_f64 i64
#define dh_alias_ptr ptr
#define dh_alias_cptr ptr
+#define dh_alias_env ptr
#define dh_alias_void void
#define dh_alias_noreturn noreturn
#define dh_alias(t) glue(dh_alias_, t)
@@ -40,26 +32,28 @@
#define dh_ctype_int int
#define dh_ctype_i64 uint64_t
#define dh_ctype_s64 int64_t
+#define dh_ctype_i128 Int128
#define dh_ctype_f16 uint32_t
#define dh_ctype_f32 float32
#define dh_ctype_f64 float64
#define dh_ctype_ptr void *
#define dh_ctype_cptr const void *
+#define dh_ctype_env CPUArchState *
#define dh_ctype_void void
-#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_noreturn G_NORETURN void
#define dh_ctype(t) dh_ctype_##t
#ifdef NEED_CPU_H
# ifdef TARGET_LONG_BITS
# if TARGET_LONG_BITS == 32
# define dh_alias_tl i32
+# define dh_typecode_tl dh_typecode_i32
# else
# define dh_alias_tl i64
+# define dh_typecode_tl dh_typecode_i64
# endif
# endif
-# define dh_alias_env ptr
# define dh_ctype_tl target_ulong
-# define dh_ctype_env CPUArchState *
#endif
/* We can't use glue() here because it falls foul of C preprocessor
@@ -68,6 +62,7 @@
#define dh_retvar_decl0_noreturn void
#define dh_retvar_decl0_i32 TCGv_i32 retval
#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retval_decl0_i128 TCGv_i128 retval
#define dh_retvar_decl0_ptr TCGv_ptr retval
#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
@@ -75,6 +70,7 @@
#define dh_retvar_decl_noreturn
#define dh_retvar_decl_i32 TCGv_i32 retval,
#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_i128 TCGv_i128 retval,
#define dh_retvar_decl_ptr TCGv_ptr retval,
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
@@ -82,52 +78,35 @@
#define dh_retvar_noreturn NULL
#define dh_retvar_i32 tcgv_i32_temp(retval)
#define dh_retvar_i64 tcgv_i64_temp(retval)
+#define dh_retvar_i128 tcgv_i128_temp(retval)
#define dh_retvar_ptr tcgv_ptr_temp(retval)
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
-#define dh_is_64bit_void 0
-#define dh_is_64bit_noreturn 0
-#define dh_is_64bit_i32 0
-#define dh_is_64bit_i64 1
-#define dh_is_64bit_ptr (sizeof(void *) == 8)
-#define dh_is_64bit_cptr dh_is_64bit_ptr
-#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
-
-#define dh_is_signed_void 0
-#define dh_is_signed_noreturn 0
-#define dh_is_signed_i32 0
-#define dh_is_signed_s32 1
-#define dh_is_signed_i64 0
-#define dh_is_signed_s64 1
-#define dh_is_signed_f16 0
-#define dh_is_signed_f32 0
-#define dh_is_signed_f64 0
-#define dh_is_signed_tl 0
-#define dh_is_signed_int 1
-/* ??? This is highly specific to the host cpu. There are even special
- extension instructions that may be required, e.g. ia64's addp4. But
- for now we don't support any 64-bit targets with 32-bit pointers. */
-#define dh_is_signed_ptr 0
-#define dh_is_signed_cptr dh_is_signed_ptr
-#define dh_is_signed_env dh_is_signed_ptr
-#define dh_is_signed(t) dh_is_signed_##t
+#define dh_typecode_void 0
+#define dh_typecode_noreturn 0
+#define dh_typecode_i32 2
+#define dh_typecode_s32 3
+#define dh_typecode_i64 4
+#define dh_typecode_s64 5
+#define dh_typecode_ptr 6
+#define dh_typecode_i128 7
+#define dh_typecode_int dh_typecode_s32
+#define dh_typecode_f16 dh_typecode_i32
+#define dh_typecode_f32 dh_typecode_i32
+#define dh_typecode_f64 dh_typecode_i64
+#define dh_typecode_cptr dh_typecode_ptr
+#define dh_typecode_env dh_typecode_ptr
+#define dh_typecode(t) dh_typecode_##t
#define dh_callflag_i32 0
-#define dh_callflag_s32 0
-#define dh_callflag_int 0
#define dh_callflag_i64 0
-#define dh_callflag_s64 0
-#define dh_callflag_f16 0
-#define dh_callflag_f32 0
-#define dh_callflag_f64 0
+#define dh_callflag_i128 0
#define dh_callflag_ptr 0
-#define dh_callflag_cptr dh_callflag_ptr
#define dh_callflag_void 0
#define dh_callflag_noreturn TCG_CALL_NO_RETURN
#define dh_callflag(t) glue(dh_callflag_, dh_alias(t))
-#define dh_sizemask(t, n) \
- ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
+#define dh_typemask(t, n) (dh_typecode(t) << (n * 3))
#define dh_arg(t, n) \
glue(glue(tcgv_, dh_alias(t)), _temp)(glue(arg, n))
@@ -151,6 +130,6 @@
#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \
DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7)
-/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+/* MAX_CALL_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
#endif /* EXEC_HELPER_HEAD_H */
diff --git a/include/exec/helper-info.c.inc b/include/exec/helper-info.c.inc
new file mode 100644
index 0000000000..530d2e6d35
--- /dev/null
+++ b/include/exec/helper-info.c.inc
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands info structures for tcg helpers.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "tcg/tcg.h"
+#include "tcg/helper-info.h"
+#include "exec/helper-head.h"
+
+/*
+ * Need one more level of indirection before stringification
+ * to get all the macros expanded first.
+ */
+#define str(s) #s
+
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, RET) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) \
+ };
+
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, RET, T1) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ };
+
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, RET, T1, T2) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) \
+ };
+
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, RET, T1, T2, T3) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ };
+
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, RET, T1, T2, T3, T4) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) \
+ };
+
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, RET, T1, T2, T3, T4, T5) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ };
+
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) \
+ };
+
+#define DEF_HELPER_FLAGS_7(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6, T7) \
+ TCGHelperInfo glue(helper_info_, NAME) = { \
+ .func = HELPER(NAME), .name = str(NAME), \
+ .flags = FLAGS | dh_callflag(RET), \
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
+ | dh_typemask(T6, 6) | dh_typemask(T7, 7) \
+ };
+
+#include HELPER_H
+
+#undef str
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
new file mode 100644
index 0000000000..8b67170a22
--- /dev/null
+++ b/include/exec/helper-proto-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
+
+#ifndef HELPER_PROTO_COMMON_H
+#define HELPER_PROTO_COMMON_H
+
+#include "qemu/atomic128.h" /* for HAVE_CMPXCHG128 */
+
+#define HELPER_H "accel/tcg/tcg-runtime.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#define HELPER_H "accel/tcg/plugin-helpers.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
+
+#endif /* HELPER_PROTO_COMMON_H */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
index a0a8d9aa46..6935cb4f16 100644
--- a/include/exec/helper-proto.h
+++ b/include/exec/helper-proto.h
@@ -1,52 +1,16 @@
-/* Helper file for declaring TCG helper functions.
- This one expands prototypes for the helper functions. */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ */
#ifndef HELPER_PROTO_H
#define HELPER_PROTO_H
-#include "exec/helper-head.h"
+#include "exec/helper-proto-common.h"
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
-dh_ctype(ret) HELPER(name) (void);
-
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1));
-
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
-
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
-
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4));
-
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5));
-
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
-
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
- dh_ctype(t7));
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "tcg-runtime.h"
-#include "plugin-helpers.h"
-
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
+#define HELPER_H "helper.h"
+#include "exec/helper-proto.h.inc"
+#undef HELPER_H
#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-proto.h.inc b/include/exec/helper-proto.h.inc
new file mode 100644
index 0000000000..c3aa666929
--- /dev/null
+++ b/include/exec/helper-proto.h.inc
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper file for declaring TCG helper functions.
+ * This one expands prototypes for the helper functions.
+ * Define HELPER_H for the header file to be expanded.
+ */
+
+#include "exec/helper-head.h"
+
+/*
+ * Work around an issue with --enable-lto, in which GCC's ipa-split pass
+ * decides to split out the noreturn code paths that raise an exception,
+ * taking the __builtin_return_address() along into the new function,
+ * where it no longer computes a value that returns to TCG generated code.
+ * Despite the name, the noinline attribute affects splitter, so this
+ * prevents the optimization in question. Given that helpers should not
+ * otherwise be called directly, this should not have any other visible effect.
+ *
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1454
+ */
+#define DEF_HELPER_ATTR __attribute__((noinline))
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
+ dh_ctype(t3)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), \
+ dh_ctype(t6)) DEF_HELPER_ATTR;
+
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
+ dh_ctype(t7)) DEF_HELPER_ATTR;
+
+#define IN_HELPER_PROTO
+
+#include HELPER_H
+
+#undef IN_HELPER_PROTO
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
+#undef DEF_HELPER_FLAGS_7
+#undef DEF_HELPER_ATTR
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
deleted file mode 100644
index 27870509a2..0000000000
--- a/include/exec/helper-tcg.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Helper file for declaring TCG helper functions.
- This one defines data structures private to tcg.c. */
-
-#ifndef HELPER_TCG_H
-#define HELPER_TCG_H
-
-#include "exec/helper-head.h"
-
-/* Need one more level of indirection before stringification
- to get all the macros expanded first. */
-#define str(s) #s
-
-#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) },
-
-#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
-
-#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) },
-
-#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) },
-
-#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) },
-
-#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
- | dh_sizemask(t5, 5) },
-
-#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
- { .func = HELPER(NAME), .name = str(NAME), \
- .flags = FLAGS | dh_callflag(ret), \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
- | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) },
-
-#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \
- { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
- .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
- | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
- | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) },
-
-#include "helper.h"
-#include "trace/generated-helpers.h"
-#include "tcg-runtime.h"
-#include "plugin-helpers.h"
-
-#undef str
-#undef DEF_HELPER_FLAGS_0
-#undef DEF_HELPER_FLAGS_1
-#undef DEF_HELPER_FLAGS_2
-#undef DEF_HELPER_FLAGS_3
-#undef DEF_HELPER_FLAGS_4
-#undef DEF_HELPER_FLAGS_5
-#undef DEF_HELPER_FLAGS_6
-#undef DEF_HELPER_FLAGS_7
-
-#endif /* HELPER_TCG_H */
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
index a71c93cc81..50fbb2d96c 100644
--- a/include/exec/hwaddr.h
+++ b/include/exec/hwaddr.h
@@ -10,7 +10,7 @@
typedef uint64_t hwaddr;
#define HWADDR_MAX UINT64_MAX
-#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_FMT_plx "%016" PRIx64
#define HWADDR_PRId PRId64
#define HWADDR_PRIi PRIi64
#define HWADDR_PRIo PRIo64
@@ -18,4 +18,9 @@ typedef uint64_t hwaddr;
#define HWADDR_PRIx PRIx64
#define HWADDR_PRIX PRIX64
+typedef struct MemMapEntry {
+ hwaddr base;
+ hwaddr size;
+} MemMapEntry;
+
#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
index 97feb296d2..4397f12f93 100644
--- a/include/exec/ioport.h
+++ b/include/exec/ioport.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -35,7 +35,6 @@ typedef struct MemoryRegionPortio {
unsigned size;
uint32_t (*read)(void *opaque, uint32_t address);
void (*write)(void *opaque, uint32_t address, uint32_t data);
- uint32_t base; /* private field */
} MemoryRegionPortio;
#define PORTIO_END_OF_LIST() { }
@@ -55,6 +54,7 @@ typedef struct PortioList {
const struct MemoryRegionPortio *ports;
Object *owner;
struct MemoryRegion *address_space;
+ uint32_t addr;
unsigned nr;
struct MemoryRegion **regions;
void *opaque;
@@ -71,5 +71,7 @@ void portio_list_add(PortioList *piolist,
struct MemoryRegion *address_space,
uint32_t addr);
void portio_list_del(PortioList *piolist);
+void portio_list_set_enabled(PortioList *piolist, bool enabled);
+void portio_list_set_address(PortioList *piolist, uint32_t addr);
#endif /* IOPORT_H */
diff --git a/include/exec/log.h b/include/exec/log.h
index 3ed797c1c8..4a7375a45f 100644
--- a/include/exec/log.h
+++ b/include/exec/log.h
@@ -15,15 +15,10 @@
*/
static inline void log_cpu_state(CPUState *cpu, int flags)
{
- QemuLogFile *logfile;
-
- if (qemu_log_enabled()) {
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile) {
- cpu_dump_state(cpu, logfile->fd, flags);
- }
- rcu_read_unlock();
+ FILE *f = qemu_log_trylock();
+ if (f) {
+ cpu_dump_state(cpu, f, flags);
+ qemu_log_unlock(f);
}
}
@@ -42,43 +37,4 @@ static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
}
}
-#ifdef NEED_CPU_H
-/* disas() and target_disas() to qemu_logfile: */
-static inline void log_target_disas(CPUState *cpu, target_ulong start,
- target_ulong len)
-{
- QemuLogFile *logfile;
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile) {
- target_disas(logfile->fd, cpu, start, len);
- }
- rcu_read_unlock();
-}
-
-static inline void log_disas(void *code, unsigned long size, const char *note)
-{
- QemuLogFile *logfile;
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile) {
- disas(logfile->fd, code, size, note);
- }
- rcu_read_unlock();
-}
-
-#if defined(CONFIG_USER_ONLY)
-/* page_dump() output to the log file: */
-static inline void log_page_dump(const char *operation)
-{
- FILE *logfile = qemu_log_lock();
- if (logfile) {
- qemu_log("page layout changed following %s\n", operation);
- page_dump(logfile);
- }
- qemu_log_unlock(logfile);
-}
-#endif
-#endif
-
#endif
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
index 95f2d20d55..14cdd8d582 100644
--- a/include/exec/memattrs.h
+++ b/include/exec/memattrs.h
@@ -29,26 +29,29 @@ typedef struct MemTxAttrs {
* "didn't specify" if necessary.
*/
unsigned int unspecified:1;
- /* ARM/AMBA: TrustZone Secure access
+ /*
+ * ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
*/
unsigned int secure:1;
+ /*
+ * ARM: ArmSecuritySpace. This partially overlaps secure, but it is
+ * easier to have both fields to assist code that does not understand
+ * ARMv9 RME, or no specific knowledge of ARM at all (e.g. pflash).
+ */
+ unsigned int space:2;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;
- /* Requester ID (for MSI for example) */
- unsigned int requester_id:16;
- /* Invert endianness for this page */
- unsigned int byte_swap:1;
/*
- * The following are target-specific page-table bits. These are not
- * related to actual memory transactions at all. However, this structure
- * is part of the tlb_fill interface, cached in the cputlb structure,
- * and has unused bits. These fields will be read by target-specific
- * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
+ * Bus interconnect and peripherals can access anything (memories,
+ * devices) by default. By setting the 'memory' bit, bus transaction
+ * are restricted to "normal" memories (per the AMBA documentation)
+ * versus devices. Access to devices will be logged and rejected
+ * (see MEMTX_ACCESS_ERROR).
*/
- unsigned int target_tlb_bit0 : 1;
- unsigned int target_tlb_bit1 : 1;
- unsigned int target_tlb_bit2 : 1;
+ unsigned int memory:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
} MemTxAttrs;
/* Bus masters which don't specify any attributes will get this,
@@ -66,6 +69,7 @@ typedef struct MemTxAttrs {
#define MEMTX_OK 0
#define MEMTX_ERROR (1U << 0) /* device returned an error */
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
+#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
typedef uint32_t MemTxResult;
#endif
diff --git a/include/exec/memop.h b/include/exec/memop.h
index 529d07b02d..a86dc6743a 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -19,12 +19,16 @@ typedef enum MemOp {
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
- MO_SIZE = 3, /* Mask for the above. */
+ MO_128 = 4,
+ MO_256 = 5,
+ MO_512 = 6,
+ MO_1024 = 7,
+ MO_SIZE = 0x07, /* Mask for the above. */
- MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
+ MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
- MO_BSWAP = 8, /* Host reverse endian. */
-#ifdef HOST_WORDS_BIGENDIAN
+ MO_BSWAP = 0x10, /* Host reverse endian. */
+#if HOST_BIG_ENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
@@ -32,7 +36,7 @@ typedef enum MemOp {
MO_BE = MO_BSWAP,
#endif
#ifdef NEED_CPU_H
-#ifdef TARGET_WORDS_BIGENDIAN
+#if TARGET_BIG_ENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
@@ -43,8 +47,6 @@ typedef enum MemOp {
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
- * The default depends on whether the target CPU defines
- * TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
@@ -59,51 +61,88 @@ typedef enum MemOp {
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
- MO_ASHIFT = 4,
- MO_AMASK = 7 << MO_ASHIFT,
-#ifdef NEED_CPU_H
-#ifdef TARGET_ALIGNED_ONLY
- MO_ALIGN = 0,
- MO_UNALN = MO_AMASK,
-#else
- MO_ALIGN = MO_AMASK,
- MO_UNALN = 0,
-#endif
-#endif
+ MO_ASHIFT = 5,
+ MO_AMASK = 0x7 << MO_ASHIFT,
+ MO_UNALN = 0,
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
+ MO_ALIGN = MO_AMASK,
+
+ /*
+ * MO_ATOM_* describes the atomicity requirements of the operation:
+ * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
+ * is aligned; if unaligned there is no atomicity.
+ * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
+ * be a pair of half-sized operations which are packed together
+ * for convenience, with single-copy atomicity on each half if
+ * the half is aligned.
+ * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
+ * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
+ * is unaligned, so long as it does not cross a 16-byte boundary;
+ * if it crosses a 16-byte boundary there is no atomicity.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
+ * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
+ * if it happens to be within a 16-byte boundary, otherwise it
+ * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
+ * Depending on alignment, one or both will be single-copy atomic.
+ * This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
+ * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
+ * by the alignment. E.g. if the address is 0 mod 4, then each
+ * 4-byte subobject is single-copy atomic.
+ * This is the atomicity e.g. of IBM Power.
+ * MO_ATOM_NONE: the operation has no atomicity requirements.
+ *
+ * Note the default (i.e. 0) value is single-copy atomic to the
+ * size of the operation, if aligned. This retains the behaviour
+ * from before this field was introduced.
+ */
+ MO_ATOM_SHIFT = 8,
+ MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
+ MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
+ MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
+ MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT,
+ MO_ATOM_NONE = 5 << MO_ATOM_SHIFT,
+ MO_ATOM_MASK = 7 << MO_ATOM_SHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
+ MO_UQ = MO_64,
+ MO_UO = MO_128,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
- MO_Q = MO_64,
+ MO_SQ = MO_SIGN | MO_64,
+ MO_SO = MO_SIGN | MO_128,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
+ MO_LEUQ = MO_LE | MO_UQ,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
- MO_LEQ = MO_LE | MO_Q,
+ MO_LESQ = MO_LE | MO_SQ,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
+ MO_BEUQ = MO_BE | MO_UQ,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
- MO_BEQ = MO_BE | MO_Q,
+ MO_BESQ = MO_BE | MO_SQ,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
+ MO_TEUQ = MO_TE | MO_UQ,
+ MO_TEUO = MO_TE | MO_UO,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
- MO_TEQ = MO_TE | MO_Q,
+ MO_TESQ = MO_TE | MO_SQ,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h
new file mode 100644
index 0000000000..eb7f1591a3
--- /dev/null
+++ b/include/exec/memopidx.h
@@ -0,0 +1,55 @@
+/*
+ * Combine the MemOp and mmu_idx parameters into a single value.
+ *
+ * Authors:
+ * Richard Henderson <rth@twiddle.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_MEMOPIDX_H
+#define EXEC_MEMOPIDX_H
+
+#include "exec/memop.h"
+
+typedef uint32_t MemOpIdx;
+
+/**
+ * make_memop_idx
+ * @op: memory operation
+ * @idx: mmu index
+ *
+ * Encode these values into a single parameter.
+ */
+static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
+{
+#ifdef CONFIG_DEBUG_TCG
+ assert(idx <= 15);
+#endif
+ return (op << 4) | idx;
+}
+
+/**
+ * get_memop
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory operation from the combined value.
+ */
+static inline MemOp get_memop(MemOpIdx oi)
+{
+ return oi >> 4;
+}
+
+/**
+ * get_mmuidx
+ * @oi: combined op/idx parameter
+ *
+ * Extract the mmu index from the combined value.
+ */
+static inline unsigned get_mmuidx(MemOpIdx oi)
+{
+ return oi & 15;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
index 9fcc2af25c..100c1237ac 100644
--- a/include/exec/memory-internal.h
+++ b/include/exec/memory-internal.h
@@ -38,10 +38,6 @@ void flatview_unref(FlatView *view);
extern const MemoryRegionOps unassigned_mem_ops;
-bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
- unsigned size, bool is_write,
- MemTxAttrs attrs);
-
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
void address_space_dispatch_compact(AddressSpaceDispatch *d);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 307e527835..dbb1bad72f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -24,6 +24,7 @@
#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/int128.h"
+#include "qemu/range.h"
#include "qemu/notify.h"
#include "qom/object.h"
#include "qemu/rcu.h"
@@ -33,30 +34,80 @@
#define MAX_PHYS_ADDR_SPACE_BITS 62
#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
-#define TYPE_MEMORY_REGION "qemu:memory-region"
-#define MEMORY_REGION(obj) \
- OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
+#define TYPE_MEMORY_REGION "memory-region"
+DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
+ TYPE_MEMORY_REGION)
+
+#define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
+typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
+DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
+ IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
+
+#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
+typedef struct RamDiscardManagerClass RamDiscardManagerClass;
+typedef struct RamDiscardManager RamDiscardManager;
+DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
+ RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
+
+#ifdef CONFIG_FUZZ
+void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr);
+#else
+static inline void fuzz_dma_read_cb(size_t addr,
+ size_t len,
+ MemoryRegion *mr)
+{
+ /* Do Nothing */
+}
+#endif
+
+/* Possible bits for global_dirty_log_{start|stop} */
+
+/* Dirty tracking enabled because migration is running */
+#define GLOBAL_DIRTY_MIGRATION (1U << 0)
-#define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
-#define IOMMU_MEMORY_REGION(obj) \
- OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
-#define IOMMU_MEMORY_REGION_CLASS(klass) \
- OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
- TYPE_IOMMU_MEMORY_REGION)
-#define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
- TYPE_IOMMU_MEMORY_REGION)
+/* Dirty tracking enabled because measuring dirty rate */
+#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
-extern bool global_dirty_log;
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
+
+extern unsigned int global_dirty_tracking;
typedef struct MemoryRegionOps MemoryRegionOps;
struct ReservedRegion {
- hwaddr low;
- hwaddr high;
+ Range range;
unsigned type;
};
+/**
+ * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @fv: the flat view of the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ * @nonvolatile: this section is non-volatile
+ * @unmergeable: this section should not get merged with adjacent sections
+ */
+struct MemoryRegionSection {
+ Int128 size;
+ MemoryRegion *mr;
+ FlatView *fv;
+ hwaddr offset_within_region;
+ hwaddr offset_within_address_space;
+ bool readonly;
+ bool nonvolatile;
+ bool unmergeable;
+};
+
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
/* See address_space_translate: bit 0 is read, bit 1 is write. */
@@ -80,6 +131,32 @@ struct IOMMUTLBEntry {
/*
* Bitmap for different IOMMUNotifier capabilities. Each notifier can
* register with one or multiple IOMMU Notifier capability bit(s).
+ *
+ * Normally there're two use cases for the notifiers:
+ *
+ * (1) When the device needs accurate synchronizations of the vIOMMU page
+ * tables, it needs to register with both MAP|UNMAP notifies (which
+ * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
+ *
+ * Regarding to accurate synchronization, it's when the notified
+ * device maintains a shadow page table and must be notified on each
+ * guest MAP (page table entry creation) and UNMAP (invalidation)
+ * events (e.g. VFIO). Both notifications must be accurate so that
+ * the shadow page table is fully in sync with the guest view.
+ *
+ * (2) When the device doesn't need accurate synchronizations of the
+ * vIOMMU page tables, it needs to register only with UNMAP or
+ * DEVIOTLB_UNMAP notifies.
+ *
+ * It's when the device maintains a cache of IOMMU translations
+ * (IOTLB) and is able to fill that cache by requesting translations
+ * from the vIOMMU through a protocol similar to ATS (Address
+ * Translation Service).
+ *
+ * Note that in this mode the vIOMMU will not maintain a shadowed
+ * page table for the address space, and the UNMAP messages can cover
+ * more than the pages that used to get mapped. The IOMMU notifiee
+ * should be able to take care of over-sized invalidations.
*/
typedef enum {
IOMMU_NOTIFIER_NONE = 0,
@@ -87,9 +164,14 @@ typedef enum {
IOMMU_NOTIFIER_UNMAP = 0x1,
/* Notify entry changes (newly created entries) */
IOMMU_NOTIFIER_MAP = 0x2,
+ /* Notify changes on device IOTLB entries */
+ IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
} IOMMUNotifierFlag;
-#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
+#define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
+#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
+ IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
struct IOMMUNotifier;
typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
@@ -106,6 +188,11 @@ struct IOMMUNotifier {
};
typedef struct IOMMUNotifier IOMMUNotifier;
+typedef struct IOMMUTLBEvent {
+ IOMMUNotifierFlag type;
+ IOMMUTLBEntry entry;
+} IOMMUTLBEvent;
+
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
#define RAM_PREALLOC (1 << 0)
@@ -113,7 +200,7 @@ typedef struct IOMMUNotifier IOMMUNotifier;
#define RAM_SHARED (1 << 1)
/* Only a portion of RAM (used_length) is actually used, and migrated.
- * This used_length size can change across reboots.
+ * Resizing RAM while migrating can result in the migration being canceled.
*/
#define RAM_RESIZEABLE (1 << 2)
@@ -129,6 +216,36 @@ typedef struct IOMMUNotifier IOMMUNotifier;
/* RAM is a persistent kind memory */
#define RAM_PMEM (1 << 5)
+
+/*
+ * UFFDIO_WRITEPROTECT is used on this RAMBlock to
+ * support 'write-tracking' migration type.
+ * Implies ram_state->ram_wt_enabled.
+ */
+#define RAM_UF_WRITEPROTECT (1 << 6)
+
+/*
+ * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
+ * pages if applicable) is skipped: will bail out if not supported. When not
+ * set, the OS will do the reservation, if supported for the memory type.
+ */
+#define RAM_NORESERVE (1 << 7)
+
+/* RAM that isn't accessible through normal means. */
+#define RAM_PROTECTED (1 << 8)
+
+/* RAM is an mmap-ed named file */
+#define RAM_NAMED_FILE (1 << 9)
+
+/* RAM is mmap-ed read-only */
+#define RAM_READONLY (1 << 10)
+
+/* RAM FD is opened read-only */
+#define RAM_READONLY_FD (1 << 11)
+
+/* RAM can be private that has kvm guest memfd backend */
+#define RAM_GUEST_MEMFD (1 << 12)
+
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
hwaddr start, hwaddr end,
@@ -216,7 +333,7 @@ enum IOMMUMemoryRegionAttr {
IOMMU_ATTR_SPAPR_TCE_FD
};
-/**
+/*
* IOMMUMemoryRegionClass:
*
* All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
@@ -226,15 +343,18 @@ enum IOMMUMemoryRegionAttr {
* The IOMMU implementation must use the IOMMU notifier infrastructure
* to report whenever mappings are changed, by calling
* memory_region_notify_iommu() (or, if necessary, by calling
- * memory_region_notify_one() for each registered notifier).
+ * memory_region_notify_iommu_one() for each registered notifier).
*
* Conceptually an IOMMU provides a mapping from input address
* to an output TLB entry. If the IOMMU is aware of memory transaction
* attributes and the output TLB entry depends on the transaction
* attributes, we represent this using IOMMU indexes. Each index
* selects a particular translation table that the IOMMU has:
+ *
* @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ *
* @translate takes an input address and an IOMMU index
+ *
* and the mapping returned can only depend on the input address and the
* IOMMU index.
*
@@ -242,11 +362,14 @@ enum IOMMUMemoryRegionAttr {
* only a single IOMMU index. A more complex IOMMU might have one index
* for secure transactions and one for non-secure transactions.
*/
-typedef struct IOMMUMemoryRegionClass {
- /* private */
+struct IOMMUMemoryRegionClass {
+ /* private: */
MemoryRegionClass parent_class;
- /*
+ /* public: */
+ /**
+ * @translate:
+ *
* Return a TLB entry that contains a given address.
*
* The IOMMUAccessFlags indicated via @flag are optional and may
@@ -267,26 +390,38 @@ typedef struct IOMMUMemoryRegionClass {
* information when the IOMMU mapping changes.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @hwaddr: address to be translated within the memory region
- * @flag: requested access permissions
+ *
+ * @flag: requested access permission
+ *
* @iommu_idx: IOMMU index for the translation
*/
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
IOMMUAccessFlags flag, int iommu_idx);
- /* Returns minimum supported page size in bytes.
+ /**
+ * @get_min_page_size:
+ *
+ * Returns minimum supported page size in bytes.
+ *
* If this method is not provided then the minimum is assumed to
* be TARGET_PAGE_SIZE.
*
* @iommu: the IOMMUMemoryRegion
*/
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
- /* Called when IOMMU Notifier flag changes (ie when the set of
+ /**
+ * @notify_flag_changed:
+ *
+ * Called when IOMMU Notifier flag changes (ie when the set of
* events which IOMMU users are requesting notification for changes).
* Optional method -- need not be provided if the IOMMU does not
* need to know exactly which events must be notified.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @old_flags: events which previously needed to be notified
+ *
* @new_flags: events which now need to be notified
*
* Returns 0 on success, or a negative errno; in particular
@@ -298,7 +433,10 @@ typedef struct IOMMUMemoryRegionClass {
IOMMUNotifierFlag old_flags,
IOMMUNotifierFlag new_flags,
Error **errp);
- /* Called to handle memory_region_iommu_replay().
+ /**
+ * @replay:
+ *
+ * Called to handle memory_region_iommu_replay().
*
* The default implementation of memory_region_iommu_replay() is to
* call the IOMMU translate method for every page in the address space
@@ -315,7 +453,10 @@ typedef struct IOMMUMemoryRegionClass {
*/
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
- /* Get IOMMU misc attributes. This is an optional method that
+ /**
+ * @get_attr:
+ *
+ * Get IOMMU misc attributes. This is an optional method that
* can be used to allow users of the IOMMU to get implementation-specific
* information. The IOMMU implements this method to handle calls
* by IOMMU users to memory_region_iommu_get_attr() by filling in
@@ -324,7 +465,9 @@ typedef struct IOMMUMemoryRegionClass {
* memory_region_iommu_get_attr() will always return -EINVAL.
*
* @iommu: the IOMMUMemoryRegion
+ *
* @attr: attribute being queried
+ *
* @data: memory to fill in with the attribute data
*
* Returns 0 on success, or a negative errno; in particular
@@ -333,7 +476,10 @@ typedef struct IOMMUMemoryRegionClass {
int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
void *data);
- /* Return the IOMMU index to use for a given set of transaction attributes.
+ /**
+ * @attrs_to_index:
+ *
+ * Return the IOMMU index to use for a given set of transaction attributes.
*
* Optional method: if an IOMMU only supports a single IOMMU index then
* the default implementation of memory_region_iommu_attrs_to_index()
@@ -346,7 +492,10 @@ typedef struct IOMMUMemoryRegionClass {
*/
int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
- /* Return the number of IOMMU indexes this IOMMU supports.
+ /**
+ * @num_indexes:
+ *
+ * Return the number of IOMMU indexes this IOMMU supports.
*
* Optional method: if this method is not provided, then
* memory_region_iommu_num_indexes() will return 1, indicating that
@@ -355,7 +504,279 @@ typedef struct IOMMUMemoryRegionClass {
* @iommu: the IOMMUMemoryRegion
*/
int (*num_indexes)(IOMMUMemoryRegion *iommu);
-} IOMMUMemoryRegionClass;
+
+ /**
+ * @iommu_set_page_size_mask:
+ *
+ * Restrict the page size mask that can be supported with a given IOMMU
+ * memory region. Used for example to propagate host physical IOMMU page
+ * size mask limitations to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default global
+ * page mask is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @page_size_mask: a bitmask of supported page sizes. At least one bit,
+ * representing the smallest page size, must be set. Additional set bits
+ * represent supported block sizes. For example a host physical IOMMU that
+ * uses page tables with a page size of 4kB, and supports 2MB and 4GB
+ * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
+ * block sizes is specified with mask 0xfffffffffffff000.
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
+ uint64_t page_size_mask,
+ Error **errp);
+ /**
+ * @iommu_set_iova_ranges:
+ *
+ * Propagate information about the usable IOVA ranges for a given IOMMU
+ * memory region. Used for example to propagate host physical device
+ * reserved memory region constraints to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default IOVA
+ * aperture is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
+};
+
+typedef struct RamDiscardListener RamDiscardListener;
+typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+struct RamDiscardListener {
+ /*
+ * @notify_populate:
+ *
+ * Notification that previously discarded memory is about to get populated.
+ * Listeners are able to object. If any listener objects, already
+ * successfully notified listeners are notified about a discard again.
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ *
+ * Returns 0 on success. If the notification is rejected by the listener,
+ * an error is returned.
+ */
+ NotifyRamPopulate notify_populate;
+
+ /*
+ * @notify_discard:
+ *
+ * Notification that previously populated memory was discarded successfully
+ * and listeners should drop all references to such memory and prevent
+ * new population (e.g., unmap).
+ *
+ * @rdl: the #RamDiscardListener getting notified
+ * @section: the #MemoryRegionSection to get populated. The section
+ * is aligned within the memory region to the minimum granularity
+ * unless it would exceed the registered section.
+ */
+ NotifyRamDiscard notify_discard;
+
+ /*
+ * @double_discard_supported:
+ *
+ * The listener suppors getting @notify_discard notifications that span
+ * already discarded parts.
+ */
+ bool double_discard_supported;
+
+ MemoryRegionSection *section;
+ QLIST_ENTRY(RamDiscardListener) next;
+};
+
+static inline void ram_discard_listener_init(RamDiscardListener *rdl,
+ NotifyRamPopulate populate_fn,
+ NotifyRamDiscard discard_fn,
+ bool double_discard_supported)
+{
+ rdl->notify_populate = populate_fn;
+ rdl->notify_discard = discard_fn;
+ rdl->double_discard_supported = double_discard_supported;
+}
+
+typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
+typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
+
+/*
+ * RamDiscardManagerClass:
+ *
+ * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
+ * regions are currently populated to be used/accessed by the VM, notifying
+ * after parts were discarded (freeing up memory) and before parts will be
+ * populated (consuming memory), to be used/accessed by the VM.
+ *
+ * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
+ * #MemoryRegion isn't mapped into an address space yet (either directly
+ * or via an alias); it cannot change while the #MemoryRegion is
+ * mapped into an address space.
+ *
+ * The #RamDiscardManager is intended to be used by technologies that are
+ * incompatible with discarding of RAM (e.g., VFIO, which may pin all
+ * memory inside a #MemoryRegion), and require proper coordination to only
+ * map the currently populated parts, to hinder parts that are expected to
+ * remain discarded from silently getting populated and consuming memory.
+ * Technologies that support discarding of RAM don't have to bother and can
+ * simply map the whole #MemoryRegion.
+ *
+ * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
+ * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
+ * Logically unplugging memory consists of discarding RAM. The VM agreed to not
+ * access unplugged (discarded) memory - especially via DMA. virtio-mem will
+ * properly coordinate with listeners before memory is plugged (populated),
+ * and after memory is unplugged (discarded).
+ *
+ * Listeners are called in multiples of the minimum granularity (unless it
+ * would exceed the registered range) and changes are aligned to the minimum
+ * granularity within the #MemoryRegion. Listeners have to prepare for memory
+ * becoming discarded in a different granularity than it was populated and the
+ * other way around.
+ */
+struct RamDiscardManagerClass {
+ /* private */
+ InterfaceClass parent_class;
+
+ /* public */
+
+ /**
+ * @get_min_granularity:
+ *
+ * Get the minimum granularity in which listeners will get notified
+ * about changes within the #MemoryRegion via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @mr: the #MemoryRegion
+ *
+ * Returns the minimum granularity.
+ */
+ uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+ /**
+ * @is_populated:
+ *
+ * Check whether the given #MemoryRegionSection is completely populated
+ * (i.e., no parts are currently discarded) via the #RamDiscardManager.
+ * There are no alignment requirements.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ *
+ * Returns whether the given range is completely populated.
+ */
+ bool (*is_populated)(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+ /**
+ * @replay_populated:
+ *
+ * Call the #ReplayRamPopulate callback for all populated parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any call fails, no further calls are made.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamPopulate callback
+ * @opaque: pointer to forward to the callback
+ *
+ * Returns 0 on success, or a negative error if any notification failed.
+ */
+ int (*replay_populated)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn, void *opaque);
+
+ /**
+ * @replay_discarded:
+ *
+ * Call the #ReplayRamDiscard callback for all discarded parts within the
+ * #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * @rdm: the #RamDiscardManager
+ * @section: the #MemoryRegionSection
+ * @replay_fn: the #ReplayRamDiscard callback
+ * @opaque: pointer to forward to the callback
+ */
+ void (*replay_discarded)(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn, void *opaque);
+
+ /**
+ * @register_listener:
+ *
+ * Register a #RamDiscardListener for the given #MemoryRegionSection and
+ * immediately notify the #RamDiscardListener about all populated parts
+ * within the #MemoryRegionSection via the #RamDiscardManager.
+ *
+ * In case any notification fails, no further notifications are triggered
+ * and an error is logged.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ * @section: the #MemoryRegionSection
+ */
+ void (*register_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+ /**
+ * @unregister_listener:
+ *
+ * Unregister a previously registered #RamDiscardListener via the
+ * #RamDiscardManager after notifying the #RamDiscardListener about all
+ * populated parts becoming unpopulated within the registered
+ * #MemoryRegionSection.
+ *
+ * @rdm: the #RamDiscardManager
+ * @rdl: the #RamDiscardListener
+ */
+ void (*unregister_listener)(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+};
+
+uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr);
+
+bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section);
+
+int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamPopulate replay_fn,
+ void *opaque);
+
+void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscard replay_fn,
+ void *opaque);
+
+void ram_discard_manager_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section);
+
+void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl);
+
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only,
+ bool *mr_has_discard_manager);
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
@@ -377,15 +798,18 @@ struct MemoryRegion {
bool nonvolatile;
bool rom_device;
bool flush_coalesced_mmio;
- bool global_locking;
+ bool unmergeable;
uint8_t dirty_log_mask;
bool is_iommu;
RAMBlock *ram_block;
Object *owner;
+ /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
+ DeviceState *dev;
const MemoryRegionOps *ops;
void *opaque;
MemoryRegion *container;
+ int mapped_via_alias; /* Mapped via an alias, container might be NULL */
Int128 size;
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
@@ -404,6 +828,10 @@ struct MemoryRegion {
const char *name;
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
+ RamDiscardManager *rdm; /* Only for RAM */
+
+ /* For devices designed to perform re-entrant IO into their own IO MRs */
+ bool disable_reentrancy_guard;
};
struct IOMMUMemoryRegion {
@@ -416,8 +844,12 @@ struct IOMMUMemoryRegion {
#define IOMMU_NOTIFIER_FOREACH(n, mr) \
QLIST_FOREACH((n), &(mr)->iommu_notify, node)
+#define MEMORY_LISTENER_PRIORITY_MIN 0
+#define MEMORY_LISTENER_PRIORITY_ACCEL 10
+#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
+
/**
- * MemoryListener: callbacks structure for updates to the physical memory map
+ * struct MemoryListener: callbacks structure for updates to the physical memory map
*
* Allows a component to adjust to changes in the guest-visible memory map.
* Use with memory_listener_register() and memory_listener_unregister().
@@ -488,7 +920,7 @@ struct MemoryListener {
* @log_start:
*
* Called during an address space update transaction, after
- * one of #MemoryListener.region_add(),#MemoryListener.region_del() or
+ * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
* #MemoryListener.region_nop(), if dirty memory logging clients have
* become active since the last transaction.
*
@@ -534,6 +966,21 @@ struct MemoryListener {
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
/**
+ * @log_sync_global:
+ *
+ * This is the global version of @log_sync when the listener does
+ * not have a way to synchronize the log with finer granularity.
+ * When the listener registers with @log_sync_global defined, then
+ * its @log_sync must be NULL. Vice versa.
+ *
+ * @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should guarantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
+ */
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
+
+ /**
* @log_clear:
*
* Called before reading the dirty memory bitmap for a
@@ -554,8 +1001,11 @@ struct MemoryListener {
* active at that time.
*
* @listener: The #MemoryListener.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
- void (*log_global_start)(MemoryListener *listener);
+ bool (*log_global_start)(MemoryListener *listener, Error **errp);
/**
* @log_global_stop:
@@ -648,6 +1098,14 @@ struct MemoryListener {
*/
unsigned priority;
+ /**
+ * @name:
+ *
+ * Name of the listener. It can be used in contexts where we'd like to
+ * identify one memory listener with the rest.
+ */
+ const char *name;
+
/* private: */
AddressSpace *address_space;
QTAILQ_ENTRY(MemoryListener) link;
@@ -655,7 +1113,7 @@ struct MemoryListener {
};
/**
- * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
*/
struct AddressSpace {
/* private: */
@@ -667,6 +1125,7 @@ struct AddressSpace {
struct FlatView *current_map;
int ioeventfd_nb;
+ int ioeventfd_notifiers;
struct MemoryRegionIoeventfd *ioeventfds;
QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
@@ -690,31 +1149,38 @@ struct FlatView {
static inline FlatView *address_space_to_flatview(AddressSpace *as)
{
- return atomic_rcu_read(&as->current_map);
+ return qatomic_rcu_read(&as->current_map);
}
+/**
+ * typedef flatview_cb: callback for flatview_for_each_range()
+ *
+ * @start: start address of the range within the FlatView
+ * @len: length of the range in bytes
+ * @mr: MemoryRegion covering this range
+ * @offset_in_region: offset of the first byte of the range within @mr
+ * @opaque: data pointer passed to flatview_for_each_range()
+ *
+ * Returns: true to stop the iteration, false to keep going.
+ */
+typedef bool (*flatview_cb)(Int128 start,
+ Int128 len,
+ const MemoryRegion *mr,
+ hwaddr offset_in_region,
+ void *opaque);
/**
- * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ * flatview_for_each_range: Iterate through a FlatView
+ * @fv: the FlatView to iterate through
+ * @cb: function to call for each range
+ * @opaque: opaque data pointer to pass to @cb
*
- * @mr: the region, or %NULL if empty
- * @fv: the flat view of the address space the region is mapped in
- * @offset_within_region: the beginning of the section, relative to @mr's start
- * @size: the size of the section; will not exceed @mr's boundaries
- * @offset_within_address_space: the address of the first byte of the section
- * relative to the region's address space
- * @readonly: writes to this section are ignored
- * @nonvolatile: this section is non-volatile
+ * A FlatView is made up of a list of non-overlapping ranges, each of
+ * which is a slice of a MemoryRegion. This function iterates through
+ * each range in @fv, calling @cb. The callback function can terminate
+ * iteration early by returning 'true'.
*/
-struct MemoryRegionSection {
- Int128 size;
- MemoryRegion *mr;
- FlatView *fv;
- hwaddr offset_within_region;
- hwaddr offset_within_address_space;
- bool readonly;
- bool nonvolatile;
-};
+void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
MemoryRegionSection *b)
@@ -729,6 +1195,26 @@ static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
}
/**
+ * memory_region_section_new_copy: Copy a memory region section
+ *
+ * Allocate memory for a new copy, copy the memory region section, and
+ * properly take a reference on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
+
+/**
+ * memory_region_section_new_copy: Free a copied memory region section
+ *
+ * Free a copy of a memory section created via memory_region_section_new_copy().
+ * properly dropping references on all relevant members.
+ *
+ * @s: the #MemoryRegionSection to copy
+ */
+void memory_region_section_free_copy(MemoryRegionSection *s);
+
+/**
* memory_region_init: Initialize a memory region
*
* The region typically acts as a container for other memory regions. Use
@@ -740,7 +1226,7 @@ static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
* @size: size of the region; any subregions beyond this size will be clipped
*/
void memory_region_init(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size);
@@ -788,7 +1274,7 @@ void memory_region_unref(MemoryRegion *mr);
* @size: size of the region.
*/
void memory_region_init_io(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -808,42 +1294,49 @@ void memory_region_init_io(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
/**
- * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
- * Accesses into the region will
- * modify memory directly.
+ * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
*
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
- * @share: allow remapping RAM to different addresses
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
+ * RAM_GUEST_MEMFD.
* @errp: pointer to Error*, to store an error if it happens.
*
- * Note that this function is similar to memory_region_init_ram_nomigrate.
- * The only difference is part of the RAM region can be remapped.
+ * Note that this function does not do anything to cause the data in the
+ * RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
- struct Object *owner,
- const char *name,
- uint64_t size,
- bool share,
- Error **errp);
+bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ uint32_t ram_flags,
+ Error **errp);
/**
- * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * memory_region_init_resizeable_ram: Initialize memory region with resizable
* RAM. Accesses into the region will
* modify memory directly. Only an initial
* portion of this RAM is actually used.
- * The used size can change across reboots.
+ * Changing the size while migrating
+ * can result in the migration being
+ * canceled.
*
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
@@ -856,9 +1349,11 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_resizeable_ram(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_resizeable_ram(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
uint64_t max_size,
@@ -879,23 +1374,26 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
* @size: size of the region.
* @align: alignment of the region base address; if 0, the default alignment
* (getpagesize()) will be used.
- * @ram_flags: Memory region features:
- * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
- * - RAM_PMEM: the memory is persistent memory
- * Other bits are ignored now.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @path: the path in which to allocate the RAM.
+ * @offset: offset within the file referenced by path
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_file(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_from_file(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
uint64_t align,
uint32_t ram_flags,
const char *path,
+ ram_addr_t offset,
Error **errp);
/**
@@ -906,19 +1404,25 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
* @owner: the object that tracks the region's reference count
* @name: the name of the region.
* @size: size of the region.
- * @share: %true if memory must be mmaped with the MAP_SHARED flag
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @fd: the fd to mmap.
+ * @offset: offset within the file referenced by fd
* @errp: pointer to Error*, to store an error if it happens.
*
* Note that this function does not do anything to cause the data in the
* RAM memory region to be migrated; that is the responsibility of the caller.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram_from_fd(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram_from_fd(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
- bool share,
+ uint32_t ram_flags,
int fd,
+ ram_addr_t offset,
Error **errp);
#endif
@@ -938,7 +1442,7 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr,
* RAM memory region to be migrated; that is the responsibility of the caller.
*/
void memory_region_init_ram_ptr(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
void *ptr);
@@ -966,7 +1470,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
* (For RAM device memory regions, migrating the contents rarely makes sense.)
*/
void memory_region_init_ram_device_ptr(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
uint64_t size,
void *ptr);
@@ -984,7 +1488,7 @@ void memory_region_init_ram_device_ptr(MemoryRegion *mr,
* @size: size of the region.
*/
void memory_region_init_alias(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const char *name,
MemoryRegion *orig,
hwaddr offset,
@@ -1007,9 +1511,11 @@ void memory_region_init_alias(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -1030,9 +1536,11 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -1089,9 +1597,11 @@ void memory_region_init_iommu(void *_iommu_mr,
* give the RAM block a unique name for migration purposes.
* We should lift this restriction and allow arbitrary Objects.
* If you pass a non-NULL non-device @owner then we will assert.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_ram(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_ram(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -1116,9 +1626,11 @@ void memory_region_init_ram(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom(MemoryRegion *mr,
+ Object *owner,
const char *name,
uint64_t size,
Error **errp);
@@ -1147,9 +1659,11 @@ void memory_region_init_rom(MemoryRegion *mr,
* must be unique within any device
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_region_init_rom_device(MemoryRegion *mr,
- struct Object *owner,
+bool memory_region_init_rom_device(MemoryRegion *mr,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
@@ -1162,7 +1676,7 @@ void memory_region_init_rom_device(MemoryRegion *mr,
*
* @mr: the memory region being queried.
*/
-struct Object *memory_region_owner(MemoryRegion *mr);
+Object *memory_region_owner(MemoryRegion *mr);
/**
* memory_region_size: get a memory region's size.
@@ -1206,6 +1720,26 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
}
/**
+ * memory_region_is_protected: check whether a memory region is protected
+ *
+ * Returns %true if a memory region is protected RAM and cannot be accessed
+ * via standard mechanisms, e.g. DMA.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_protected(MemoryRegion *mr);
+
+/**
+ * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
+ * associated
+ *
+ * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_has_guest_memfd(MemoryRegion *mr);
+
+/**
* memory_region_get_iommu: check whether a memory region is an iommu
*
* Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
@@ -1254,39 +1788,43 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
/**
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
*
- * The notification type will be decided by entry.perm bits:
- *
- * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
- * - For MAP (newly added entry) notifies: set entry.perm to the
- * permission of the page (which is definitely !IOMMU_NONE).
- *
* Note: for any IOMMU implementation, an in-place mapping change
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
* @iommu_idx: the IOMMU index for the translation table which has changed
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
- IOMMUTLBEntry entry);
+ IOMMUTLBEvent event);
/**
- * memory_region_notify_one: notify a change in an IOMMU translation
+ * memory_region_notify_iommu_one: notify a change in an IOMMU translation
* entry to a single notifier
*
* This works just like memory_region_notify_iommu(), but it only
* notifies a specific notifier, not all of them.
*
* @notifier: the notifier to be notified
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
*/
-void memory_region_notify_one(IOMMUNotifier *notifier,
- IOMMUTLBEntry *entry);
+void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
+ IOMMUTLBEvent *event);
+
+/**
+ * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
+ * translation that covers the
+ * range of a notifier
+ *
+ * @notifier: the notifier to be notified
+ */
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
+
/**
* memory_region_register_iommu_notifier: register a notifier for changes to
@@ -1364,6 +1902,30 @@ int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
/**
+ * memory_region_iommu_set_page_size_mask: set the supported page
+ * sizes for a given IOMMU memory region
+ *
+ * @iommu_mr: IOMMU memory region
+ * @page_size_mask: supported page size mask
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
+ uint64_t page_size_mask,
+ Error **errp);
+
+/**
+ * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges
+ * for a given IOMMU MR region
+ *
+ * @iommu: IOMMU memory region
+ * @iova_ranges: list of ordered IOVA ranges (at least one range)
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu,
+ GList *iova_ranges,
+ Error **errp);
+
+/**
* memory_region_name: get a memory region's name
*
* Returns the string that was used to initialize the memory region.
@@ -1437,7 +1999,7 @@ int memory_region_get_fd(MemoryRegion *mr);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1454,7 +2016,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1464,8 +2026,8 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr);
/* memory_region_ram_resize: Resize a RAM region.
*
- * Only legal before guest might have detected the memory size: e.g. on
- * incoming migration, or right after reset.
+ * Resizing RAM while migrating can result in the migration being canceled.
+ * Care has to be taken if the guest might have already detected the memory.
*
* @mr: a memory region created with @memory_region_init_resizeable_ram.
* @newsize: the new size the region
@@ -1545,7 +2107,7 @@ void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
* querying the same page multiple times, which is especially useful for
* display updates where the scanlines often are not page aligned.
*
- * The dirty bitmap region which gets copyed into the snapshot (and
+ * The dirty bitmap region which gets copied into the snapshot (and
* cleared afterwards) can be larger than requested. The boundaries
* are rounded up/down so complete bitmap longs (covering 64 pages on
* 64bit hosts) can be copied over into the bitmap snapshot. Which
@@ -1712,19 +2274,6 @@ void memory_region_set_flush_coalesced(MemoryRegion *mr);
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
/**
- * memory_region_clear_global_locking: Declares that access processing does
- * not depend on the QEMU global lock.
- *
- * By clearing this property, accesses to the memory region will be processed
- * outside of QEMU's global lock (unless the lock is held on when issuing the
- * access request). In this case, the device model implementing the access
- * handlers is responsible for synchronization of concurrency.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_clear_global_locking(MemoryRegion *mr);
-
-/**
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
* is written to a location.
*
@@ -1874,6 +2423,25 @@ void memory_region_set_size(MemoryRegion *mr, uint64_t size);
void memory_region_set_alias_offset(MemoryRegion *mr,
hwaddr offset);
+/*
+ * memory_region_set_unmergeable: Set a memory region unmergeable
+ *
+ * Mark a memory region unmergeable, resulting in the memory region (or
+ * everything contained in a memory region container) not getting merged when
+ * simplifying the address space and notifying memory listeners. Consequently,
+ * memory listeners will never get notified about ranges that are larger than
+ * the original memory regions.
+ *
+ * This is primarily useful when multiple aliases to a RAM memory region are
+ * mapped into a memory region container, and updates (e.g., enable/disable or
+ * map/unmap) of individual memory region aliases are not supposed to affect
+ * other memory regions in the same container.
+ *
+ * @mr: the #MemoryRegion to be updated
+ * @unmergeable: whether to mark the #MemoryRegion unmergeable
+ */
+void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
+
/**
* memory_region_present: checks if an address relative to a @container
* translates into #MemoryRegion within @container
@@ -1888,13 +2456,49 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr);
/**
* memory_region_is_mapped: returns true if #MemoryRegion is mapped
- * into any address space.
+ * into another memory region, which does not necessarily imply that it is
+ * mapped into an address space.
*
* @mr: a #MemoryRegion which should be checked if it's mapped
*/
bool memory_region_is_mapped(MemoryRegion *mr);
/**
+ * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * The #RamDiscardManager cannot change while a memory region is mapped.
+ *
+ * @mr: the #MemoryRegion
+ */
+RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
+
+/**
+ * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
+ * #RamDiscardManager assigned
+ *
+ * @mr: the #MemoryRegion
+ */
+static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
+{
+ return !!memory_region_get_ram_discard_manager(mr);
+}
+
+/**
+ * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
+ * #MemoryRegion
+ *
+ * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
+ * that does not cover RAM, or a #MemoryRegion that already has a
+ * #RamDiscardManager assigned.
+ *
+ * @mr: the #MemoryRegion
+ * @rdm: #RamDiscardManager to set
+ */
+void memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm);
+
+/**
* memory_region_find: translate an address/size relative to a
* MemoryRegion into a #MemoryRegionSection.
*
@@ -1929,8 +2533,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for all address spaces.
+ *
+ * @last_stage: whether this is the last stage of live migration
*/
-void memory_global_dirty_log_sync(void);
+void memory_global_dirty_log_sync(bool last_stage);
/**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
@@ -1976,16 +2582,27 @@ void memory_listener_unregister(MemoryListener *listener);
/**
* memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of starting dirty log, migration or dirty rate
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
*/
-void memory_global_dirty_log_start(void);
+bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
/**
* memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stopping dirty log, migration or dirty rate
*/
-void memory_global_dirty_log_stop(void);
+void memory_global_dirty_log_stop(unsigned int flags);
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+
/**
* memory_region_dispatch_read: perform a read directly to the specified
* MemoryRegion.
@@ -2133,12 +2750,12 @@ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
#define SUFFIX
#define ARG1 as
#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
#define SUFFIX
#define ARG1 as
#define ARG1_DECL AddressSpace *as
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
struct MemoryRegionCache {
void *ptr;
@@ -2149,9 +2766,6 @@ struct MemoryRegionCache {
bool is_write;
};
-#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
-
-
/* address_space_ld*_cached: load from a cached #MemoryRegion
* address_space_st*_cached: store into a cached #MemoryRegion
*
@@ -2179,7 +2793,7 @@ struct MemoryRegionCache {
#define SUFFIX _cached_slow
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst.inc.h"
+#include "exec/memory_ldst.h.inc"
/* Inline fast path for direct RAM access. */
static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
@@ -2194,7 +2808,7 @@ static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
}
static inline void address_space_stb_cached(MemoryRegionCache *cache,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len);
if (likely(cache->ptr)) {
@@ -2205,15 +2819,15 @@ static inline void address_space_stb_cached(MemoryRegionCache *cache,
}
#define ENDIANNESS _le
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define ENDIANNESS _be
-#include "exec/memory_ldst_cached.inc.h"
+#include "exec/memory_ldst_cached.h.inc"
#define SUFFIX _cached
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
-#include "exec/memory_ldst_phys.inc.h"
+#include "exec/memory_ldst_phys.h.inc"
/* address_space_cache_init: prepare for repeated access to a physical
* memory region
@@ -2241,6 +2855,21 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
bool is_write);
/**
+ * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
+ *
+ * @cache: The #MemoryRegionCache to operate on.
+ *
+ * Initializes #MemoryRegionCache structure without memory region attached.
+ * Cache initialized this way can only be safely destroyed, but not used.
+ */
+static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
+{
+ cache->mrs.mr = NULL;
+ /* There is no real need to initialize fv, but it makes Coverity happy. */
+ cache->fv = NULL;
+}
+
+/**
* address_space_cache_invalidate: complete a write to a #MemoryRegionCache
*
* @cache: The #MemoryRegionCache to operate on.
@@ -2361,6 +2990,9 @@ MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
hwaddr addr, const void *buf,
hwaddr len);
+int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
+bool prepare_mmio_access(MemoryRegion *mr);
+
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
@@ -2429,6 +3061,7 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
if (likely(cache->ptr)) {
memcpy(buf, cache->ptr + addr, len);
return MEMTX_OK;
@@ -2458,6 +3091,22 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
}
}
+/**
+ * address_space_set: Fill address space with a constant byte.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
+ uint8_t c, hwaddr len, MemTxAttrs attrs);
+
#ifdef NEED_CPU_H
/* enum device_endian to MemOp. */
static inline MemOp devend_memop(enum device_endian end)
@@ -2465,7 +3114,7 @@ static inline MemOp devend_memop(enum device_endian end)
QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
/* Swap if non-host endianness or native (target) endianness */
return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
#else
@@ -2502,6 +3151,12 @@ static inline MemOp devend_memop(enum device_endian end)
int ram_block_discard_disable(bool state);
/*
+ * See ram_block_discard_disable(): only disable uncoordinated discards,
+ * keeping coordinated discards (via the RamDiscardManager) enabled.
+ */
+int ram_block_uncoordinated_discard_disable(bool state);
+
+/*
* Inhibit technologies that disable discarding of pages in RAM blocks.
*
* Returns 0 if successful. Returns -EBUSY if discards are already set to
@@ -2510,12 +3165,20 @@ int ram_block_discard_disable(bool state);
int ram_block_discard_require(bool state);
/*
- * Test if discarding of memory in ram blocks is disabled.
+ * See ram_block_discard_require(): only inhibit technologies that disable
+ * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
+ * technologies that only inhibit uncoordinated discards (via the
+ * RamDiscardManager).
+ */
+int ram_block_coordinated_discard_require(bool state);
+
+/*
+ * Test if any discarding of memory in ram blocks is disabled.
*/
bool ram_block_discard_is_disabled(void);
/*
- * Test if discarding of memory in ram blocks is required to work reliably.
+ * Test if any discarding of memory in ram blocks is required to work reliably.
*/
bool ram_block_discard_is_required(void);
diff --git a/include/exec/memory_ldst.inc.h b/include/exec/memory_ldst.h.inc
index 272c20f02e..92ad74e956 100644
--- a/include/exec/memory_ldst.inc.h
+++ b/include/exec/memory_ldst.h.inc
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,48 +20,48 @@
*/
#ifdef TARGET_ENDIANNESS
-extern uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+void glue(address_space_stw, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stl, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#else
-extern uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
+uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
+uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
+uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
+uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stb, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stb, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
+void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
-extern void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
+void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#endif
diff --git a/include/exec/memory_ldst_cached.inc.h b/include/exec/memory_ldst_cached.h.inc
index fd4bbb40e7..d7834f852c 100644
--- a/include/exec/memory_ldst_cached.inc.h
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,10 +24,23 @@
#define LD_P(size) \
glue(glue(ld, size), glue(ENDIANNESS, _p))
+static inline uint16_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len && 2 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
+ if (likely(cache->ptr)) {
+ return LD_P(uw)(cache->ptr + addr);
+ } else {
+ return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
+ }
+}
+
static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 4 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr);
if (likely(cache->ptr)) {
return LD_P(l)(cache->ptr + addr);
} else {
@@ -39,6 +52,7 @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 8 <= cache->len - addr);
+ fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr);
if (likely(cache->ptr)) {
return LD_P(q)(cache->ptr + addr);
} else {
@@ -46,17 +60,6 @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
}
}
-static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
- hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
-{
- assert(addr < cache->len && 2 <= cache->len - addr);
- if (likely(cache->ptr)) {
- return LD_P(uw)(cache->ptr + addr);
- } else {
- return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
- }
-}
-
#undef ADDRESS_SPACE_LD_CACHED
#undef ADDRESS_SPACE_LD_CACHED_SLOW
#undef LD_P
@@ -68,25 +71,25 @@ static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
#define ST_P(size) \
glue(glue(st, size), glue(ENDIANNESS, _p))
-static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
{
- assert(addr < cache->len && 4 <= cache->len - addr);
+ assert(addr < cache->len && 2 <= cache->len - addr);
if (likely(cache->ptr)) {
- ST_P(l)(cache->ptr + addr, val);
+ ST_P(w)(cache->ptr + addr, val);
} else {
- ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
+ ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
}
}
-static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
+static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
{
- assert(addr < cache->len && 2 <= cache->len - addr);
+ assert(addr < cache->len && 4 <= cache->len - addr);
if (likely(cache->ptr)) {
- ST_P(w)(cache->ptr + addr, val);
+ ST_P(l)(cache->ptr + addr, val);
} else {
- ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
+ ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
}
}
diff --git a/include/exec/memory_ldst_phys.inc.h b/include/exec/memory_ldst_phys.h.inc
index 91f72973cb..ecd678610d 100644
--- a/include/exec/memory_ldst_phys.inc.h
+++ b/include/exec/memory_ldst_phys.h.inc
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,6 +20,12 @@
*/
#ifdef TARGET_ENDIANNESS
+static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
static inline uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl, SUFFIX)(ARG1, addr,
@@ -32,10 +38,10 @@ static inline uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
@@ -44,18 +50,30 @@ static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq, SUFFIX)(ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
#else
+static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_ldub, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static inline uint16_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+{
+ return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
static inline uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
@@ -80,22 +98,22 @@ static inline uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint8_t val)
{
- return glue(address_space_ldub, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stb, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
+static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
{
- return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
@@ -110,24 +128,6 @@ static inline void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t va
MEMTXATTRS_UNSPECIFIED, NULL);
}
-static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stb, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
-static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
-{
- glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
- MEMTXATTRS_UNSPECIFIED, NULL);
-}
-
static inline void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h
new file mode 100644
index 0000000000..54ddde308a
--- /dev/null
+++ b/include/exec/page-vary.h
@@ -0,0 +1,52 @@
+/*
+ * Definitions for cpus with variable page sizes.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EXEC_PAGE_VARY_H
+#define EXEC_PAGE_VARY_H
+
+typedef struct {
+ bool decided;
+ int bits;
+ uint64_t mask;
+} TargetPageBits;
+
+#ifdef IN_PAGE_VARY
+bool set_preferred_target_page_bits_common(int bits);
+void finalize_target_page_bits_common(int min);
+#endif
+
+/**
+ * set_preferred_target_page_bits:
+ * @bits: number of bits needed to represent an address within the page
+ *
+ * Set the preferred target page size (the actual target page
+ * size may be smaller than any given CPU's preference).
+ * Returns true on success, false on failure (which can only happen
+ * if this is called after the system has already finalized its
+ * choice of page size and the requested page size is smaller than that).
+ */
+bool set_preferred_target_page_bits(int bits);
+
+/**
+ * finalize_target_page_bits:
+ * Commit the final value set by set_preferred_target_page_bits.
+ */
+void finalize_target_page_bits(void);
+
+#endif /* EXEC_PAGE_VARY_H */
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
index 4834a9e2f4..c4552b5061 100644
--- a/include/exec/plugin-gen.h
+++ b/include/exec/plugin-gen.h
@@ -12,36 +12,25 @@
#ifndef QEMU_PLUGIN_GEN_H
#define QEMU_PLUGIN_GEN_H
-#include "qemu/plugin.h"
#include "tcg/tcg.h"
struct DisasContextBase;
#ifdef CONFIG_PLUGIN
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb);
-void plugin_gen_tb_end(CPUState *cpu);
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
+ bool supress);
+void plugin_gen_tb_end(CPUState *cpu, size_t num_insns);
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
void plugin_gen_insn_end(void);
void plugin_gen_disable_mem_helpers(void);
-void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info);
-
-static inline void plugin_insn_append(const void *from, size_t size)
-{
- struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
-
- if (insn == NULL) {
- return;
- }
-
- insn->data = g_byte_array_append(insn->data, from, size);
-}
+void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info);
#else /* !CONFIG_PLUGIN */
-static inline
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb)
+static inline bool
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
{
return false;
}
@@ -53,16 +42,13 @@ void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db)
static inline void plugin_gen_insn_end(void)
{ }
-static inline void plugin_gen_tb_end(CPUState *cpu)
+static inline void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
{ }
static inline void plugin_gen_disable_mem_helpers(void)
{ }
-static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
-{ }
-
-static inline void plugin_insn_append(const void *from, size_t size)
+static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
{ }
#endif /* CONFIG_PLUGIN */
diff --git a/include/exec/poison.h b/include/exec/poison.h
index 7b9ac361dc..792a83f493 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -3,7 +3,8 @@
#ifndef HW_POISON_H
#define HW_POISON_H
-#ifdef __GNUC__
+
+#include "config-poison.h"
#pragma GCC poison TARGET_I386
#pragma GCC poison TARGET_X86_64
@@ -11,8 +12,9 @@
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
-#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_LOONGARCH64
#pragma GCC poison TARGET_M68K
#pragma GCC poison TARGET_MICROBLAZE
#pragma GCC poison TARGET_MIPS
@@ -20,8 +22,6 @@
#pragma GCC poison TARGET_ABI_MIPSO32
#pragma GCC poison TARGET_MIPS64
#pragma GCC poison TARGET_ABI_MIPSN64
-#pragma GCC poison TARGET_MOXIE
-#pragma GCC poison TARGET_NIOS2
#pragma GCC poison TARGET_OPENRISC
#pragma GCC poison TARGET_PPC
#pragma GCC poison TARGET_PPC64
@@ -31,16 +31,13 @@
#pragma GCC poison TARGET_SH4
#pragma GCC poison TARGET_SPARC
#pragma GCC poison TARGET_SPARC64
-#pragma GCC poison TARGET_TILEGX
#pragma GCC poison TARGET_TRICORE
-#pragma GCC poison TARGET_UNICORE32
#pragma GCC poison TARGET_XTENSA
-#pragma GCC poison TARGET_ALIGNED_ONLY
#pragma GCC poison TARGET_HAS_BFLT
#pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_SUPPORTS_MTTCG
-#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison TARGET_BIG_ENDIAN
#pragma GCC poison BSWAP_NEEDED
#pragma GCC poison TARGET_LONG_BITS
@@ -53,8 +50,6 @@
#pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN
-#pragma GCC poison CPUArchState
-
#pragma GCC poison CPU_INTERRUPT_HARD
#pragma GCC poison CPU_INTERRUPT_EXITTB
#pragma GCC poison CPU_INTERRUPT_HALT
@@ -69,18 +64,14 @@
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
#pragma GCC poison CONFIG_ALPHA_DIS
-#pragma GCC poison CONFIG_ARM_A64_DIS
-#pragma GCC poison CONFIG_ARM_DIS
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
-#pragma GCC poison CONFIG_LM32_DIS
+#pragma GCC poison CONFIG_HEXAGON_DIS
+#pragma GCC poison CONFIG_LOONGARCH_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
#pragma GCC poison CONFIG_MIPS_DIS
-#pragma GCC poison CONFIG_NANOMIPS_DIS
-#pragma GCC poison CONFIG_MOXIE_DIS
-#pragma GCC poison CONFIG_NIOS2_DIS
#pragma GCC poison CONFIG_PPC_DIS
#pragma GCC poison CONFIG_RISCV_DIS
#pragma GCC poison CONFIG_S390_DIS
@@ -88,9 +79,10 @@
#pragma GCC poison CONFIG_SPARC_DIS
#pragma GCC poison CONFIG_XTENSA_DIS
+#pragma GCC poison CONFIG_HVF
#pragma GCC poison CONFIG_LINUX_USER
#pragma GCC poison CONFIG_KVM
-#pragma GCC poison CONFIG_SOFTMMU
+#pragma GCC poison CONFIG_WHPX
+#pragma GCC poison CONFIG_XEN
#endif
-#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 3ef729a23c..07c8f86375 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -25,6 +25,9 @@
#include "sysemu/tcg.h"
#include "exec/ramlist.h"
#include "exec/ramblock.h"
+#include "exec/exec-all.h"
+
+extern uint64_t total_dirty_pages;
/**
* clear_bmap_size: calculate clear bitmap size
@@ -40,7 +43,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
}
/**
- * clear_bmap_set: set clear bitmap for the page range
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @start: the start page number
@@ -53,12 +57,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
{
uint8_t shift = rb->clear_bmap_shift;
- bitmap_set_atomic(rb->clear_bmap, start >> shift,
- clear_bmap_size(npages, shift));
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
}
/**
- * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @page: the page number to check
@@ -69,7 +73,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
{
uint8_t shift = rb->clear_bmap_shift;
- return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
@@ -104,12 +108,11 @@ long qemu_maxrampagesize(void);
* Parameters:
* @size: the size in bytes of the ram block
* @mr: the memory region where the ram block is
- * @ram_flags: specify the properties of the ram block, which can be one
- * or bit-or of following values
- * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
- * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
- * Other bits are ignored.
+ * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
+ * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
+ * RAM_READONLY_FD, RAM_GUEST_MEMFD
* @mem_path or @fd: specify the backing file or device
+ * @offset: Offset into target file
* @errp: pointer to Error*, to store an error if it happens
*
* Return:
@@ -118,14 +121,14 @@ long qemu_maxrampagesize(void);
*/
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
uint32_t ram_flags, const char *mem_path,
- Error **errp);
+ off_t offset, Error **errp);
RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
- uint32_t ram_flags, int fd,
+ uint32_t ram_flags, int fd, off_t offset,
Error **errp);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
+RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
@@ -147,8 +150,6 @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
-
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
@@ -164,7 +165,7 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@@ -205,7 +206,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@@ -278,7 +279,7 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
}
@@ -301,7 +302,7 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
@@ -334,16 +335,25 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
}
#if !defined(_WIN32)
-static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
- ram_addr_t start,
- ram_addr_t pages)
+
+/*
+ * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * the number of dirty pages in @bitmap passed as argument. On the other hand,
+ * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * weren't set in the global migration bitmap.
+ */
+static inline
+uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
{
unsigned long i, j;
- unsigned long page_number, c;
+ unsigned long page_number, c, nbits;
hwaddr addr;
ram_addr_t ram_addr;
+ uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
+ unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
@@ -361,23 +371,32 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ blocks[i] =
+ qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
}
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
-
- if (global_dirty_log) {
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
- temp);
+ nbits = ctpopl(temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+
+ if (global_dirty_tracking) {
+ qatomic_or(
+ &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ if (unlikely(
+ global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
}
+ num_dirty += nbits;
+
if (tcg_enabled()) {
- atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
- temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
}
}
@@ -392,7 +411,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
- if (!global_dirty_log) {
+ if (!global_dirty_tracking) {
clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
}
@@ -403,6 +422,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
+ nbits = ctpopl(c);
+ if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
+ total_dirty_pages += nbits;
+ }
+ num_dirty += nbits;
do {
j = ctzl(c);
c &= ~(1ul << j);
@@ -415,9 +439,19 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
}
}
}
+
+ return num_dirty;
}
#endif /* not _WIN32 */
+static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
+ ram_addr_t length)
+{
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, length);
+ }
+
+}
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
@@ -461,12 +495,12 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
- src = atomic_rcu_read(
+ src = qatomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
- unsigned long bits = atomic_xchg(&src[idx][offset], 0);
+ unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
@@ -479,6 +513,9 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
idx++;
}
}
+ if (num_dirty) {
+ cpu_physical_memory_dirty_bits_cleared(start, length);
+ }
if (rb->clear_bmap) {
/*
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
index 07d50864d8..0babd105c0 100644
--- a/include/exec/ramblock.h
+++ b/include/exec/ramblock.h
@@ -21,6 +21,8 @@
#ifndef CONFIG_USER_ONLY
#include "cpu-common.h"
+#include "qemu/rcu.h"
+#include "exec/ramlist.h"
struct RAMBlock {
struct rcu_head rcu;
@@ -32,16 +34,31 @@ struct RAMBlock {
ram_addr_t max_length;
void (*resized)(const char*, uint64_t length, void *host);
uint32_t flags;
- /* Protected by iothread lock. */
+ /* Protected by the BQL. */
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
int fd;
+ uint64_t fd_offset;
+ int guest_memfd;
size_t page_size;
/* dirty bitmap used during migration */
unsigned long *bmap;
- /* bitmap of already received pages in postcopy */
+
+ /*
+ * Below fields are only used by mapped-ram migration
+ */
+ /* bitmap of pages present in the migration file */
+ unsigned long *file_bmap;
+ /*
+ * offset in the file pages belonging to this ramblock are saved,
+ * used only during migration to a file.
+ */
+ off_t bitmap_offset;
+ uint64_t pages_offset;
+
+ /* Bitmap of already received pages. Only used on destination side. */
unsigned long *receivedmap;
/*
@@ -51,6 +68,9 @@ struct RAMBlock {
* and split clearing of dirty bitmap on the remote node (e.g.,
* KVM). The bitmap will be set only when doing global sync.
*
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
* NOTE: this bitmap is different comparing to the other bitmaps
* in that one bit can represent multiple guest pages (which is
* decided by the `clear_bmap_shift' variable below). On
@@ -59,6 +79,16 @@ struct RAMBlock {
*/
unsigned long *clear_bmap;
uint8_t clear_bmap_shift;
+
+ /*
+ * RAM block length that corresponds to the used_length on the migration
+ * source (after RAM block sizes were synchronized). Especially, after
+ * starting to run the guest, used_length and postcopy_length can differ.
+ * Used to register/unregister uffd handlers and as the size of the received
+ * bitmap. Receiving any page beyond this length will bail out, as it
+ * could not have been valid on the source.
+ */
+ ram_addr_t postcopy_length;
};
#endif
#endif
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index bc4faa1b00..2ad2a81acc 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -19,7 +19,7 @@ typedef struct RAMBlockNotifier RAMBlockNotifier;
* rcu_read_lock();
*
* DirtyMemoryBlocks *blocks =
- * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ * qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
*
* ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
* unsigned long *block = blocks.blocks[idx];
@@ -65,16 +65,21 @@ void qemu_mutex_lock_ramlist(void);
void qemu_mutex_unlock_ramlist(void);
struct RAMBlockNotifier {
- void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size);
- void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size);
+ void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size);
+ void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
+ size_t new_size);
QLIST_ENTRY(RAMBlockNotifier) next;
};
void ram_block_notifier_add(RAMBlockNotifier *n);
void ram_block_notifier_remove(RAMBlockNotifier *n);
-void ram_block_notify_add(void *host, size_t size);
-void ram_block_notify_remove(void *host, size_t size);
+void ram_block_notify_add(void *host, size_t size, size_t max_size);
+void ram_block_notify_remove(void *host, size_t size, size_t max_size);
+void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
-void ram_block_dump(Monitor *mon);
+GString *ram_block_format(void);
#endif /* RAMLIST_H */
diff --git a/include/exec/replay-core.h b/include/exec/replay-core.h
new file mode 100644
index 0000000000..244c77acce
--- /dev/null
+++ b/include/exec/replay-core.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU replay core API
+ *
+ * Copyright (c) 2010-2015 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef EXEC_REPLAY_H
+#define EXEC_REPLAY_H
+
+#include "qapi/qapi-types-replay.h"
+
+extern ReplayMode replay_mode;
+
+/* Replay process control functions */
+
+/* Enables recording or saving event log with specified parameters */
+void replay_configure(struct QemuOpts *opts);
+/* Initializes timers used for snapshotting and enables events recording */
+void replay_start(void);
+/* Closes replay log file and frees other resources. */
+void replay_finish(void);
+/* Adds replay blocker with the specified error description */
+void replay_add_blocker(const char *feature);
+/* Returns name of the replay log file */
+const char *replay_get_filename(void);
+
+/*
+ * Start making one step in backward direction.
+ * Used by gdbstub for backwards debugging.
+ * Returns true on success.
+ */
+bool replay_reverse_step(void);
+/*
+ * Start searching the last breakpoint/watchpoint.
+ * Used by gdbstub for backwards debugging.
+ * Returns true if the process successfully started.
+ */
+bool replay_reverse_continue(void);
+/*
+ * Returns true if replay module is processing
+ * reverse_continue or reverse_step request
+ */
+bool replay_running_debug(void);
+/* Called in reverse debugging mode to collect breakpoint information */
+void replay_breakpoint(void);
+/* Called when gdb is attached to gdbstub */
+void replay_gdb_attached(void);
+
+/* Interrupts and exceptions */
+
+/* Called by exception handler to write or read exception processing events */
+bool replay_exception(void);
+/*
+ * Used to determine that exception is pending.
+ * Does not proceed to the next event in the log.
+ */
+bool replay_has_exception(void);
+/*
+ * Called by interrupt handlers to write or read interrupt processing events.
+ * Returns true if interrupt should be processed.
+ */
+bool replay_interrupt(void);
+/*
+ * Tries to read interrupt event from the file.
+ * Returns true, when interrupt request is pending.
+ */
+bool replay_has_interrupt(void);
+
+/* Processing data from random generators */
+
+/* Saves the values from the random number generator */
+void replay_save_random(int ret, void *buf, size_t len);
+/* Loads the saved values for the random number generator */
+int replay_read_random(void *buf, size_t len);
+
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
deleted file mode 100644
index fbcae88f4b..0000000000
--- a/include/exec/softmmu-semi.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Helper routines to provide target memory access for semihosting
- * syscalls in system emulation mode.
- *
- * Copyright (c) 2007 CodeSourcery.
- *
- * This code is licensed under the GPL
- */
-
-#ifndef SOFTMMU_SEMI_H
-#define SOFTMMU_SEMI_H
-
-#include "cpu.h"
-
-static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
-{
- uint64_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0);
- return tswap64(val);
-}
-
-static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
-{
- uint32_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0);
- return tswap32(val);
-}
-
-static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
-{
- uint8_t val;
-
- cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0);
- return val;
-}
-
-#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
-#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
-#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
-#define get_user_ual(arg, p) get_user_u32(arg, p)
-
-static inline void softmmu_tput64(CPUArchState *env,
- target_ulong addr, uint64_t val)
-{
- val = tswap64(val);
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1);
-}
-
-static inline void softmmu_tput32(CPUArchState *env,
- target_ulong addr, uint32_t val)
-{
- val = tswap32(val);
- cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1);
-}
-#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
-#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
-#define put_user_ual(arg, p) put_user_u32(arg, p)
-
-static void *softmmu_lock_user(CPUArchState *env,
- target_ulong addr, target_ulong len, int copy)
-{
- uint8_t *p;
- /* TODO: Make this something that isn't fixed size. */
- p = malloc(len);
- if (p && copy) {
- cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0);
- }
- return p;
-}
-#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
-static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
-{
- char *p;
- char *s;
- uint8_t c;
- /* TODO: Make this something that isn't fixed size. */
- s = p = malloc(1024);
- if (!s) {
- return NULL;
- }
- do {
- cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0);
- addr++;
- *(p++) = c;
- } while (c);
- return s;
-}
-#define lock_user_string(p) softmmu_lock_user_string(env, p)
-static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
- target_ulong len)
-{
- if (len) {
- cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1);
- }
- free(p);
-}
-#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
-
-#endif
diff --git a/include/exec/target_long.h b/include/exec/target_long.h
new file mode 100644
index 0000000000..3cd8e26a23
--- /dev/null
+++ b/include/exec/target_long.h
@@ -0,0 +1,44 @@
+/*
+ * Target Long Definitions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _TARGET_LONG_H_
+#define _TARGET_LONG_H_
+
+/*
+ * Usually this should only be included via cpu-defs.h however for
+ * certain cases where we want to build only two versions of a binary
+ * object we can include directly. However the build-system must
+ * ensure TARGET_LONG_BITS is defined directly.
+ */
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS not defined
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#define MO_TL MO_32
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#define MO_TL MO_64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#endif /* _TARGET_LONG_H_ */
diff --git a/include/exec/target_page.h b/include/exec/target_page.h
index 96726c36a4..98ffbb5c23 100644
--- a/include/exec/target_page.h
+++ b/include/exec/target_page.h
@@ -15,7 +15,9 @@
#define EXEC_TARGET_PAGE_H
size_t qemu_target_page_size(void);
+int qemu_target_page_mask(void);
int qemu_target_page_bits(void);
int qemu_target_page_bits_min(void);
+size_t qemu_target_pages_to_MiB(size_t pages);
#endif
diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h
new file mode 100644
index 0000000000..142c240d94
--- /dev/null
+++ b/include/exec/tb-flush.h
@@ -0,0 +1,28 @@
+/*
+ * tb-flush prototype for use by the rest of the system.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef _TB_FLUSH_H_
+#define _TB_FLUSH_H_
+
+/**
+ * tb_flush() - flush all translation blocks
+ * @cs: CPUState (must be valid, but treated as anonymous pointer)
+ *
+ * Used to flush all the translation blocks in the system. Sometimes
+ * it is simpler to flush everything than work out which individual
+ * translations are now invalid and ensure they are not called
+ * anymore.
+ *
+ * tb_flush() takes care of running the flush in an exclusive context
+ * if it is not already running in one. This means no guest code will
+ * run until this complete.
+ */
+void tb_flush(CPUState *cs);
+
+void tcg_flush_jmp_cache(CPUState *cs);
+
+#endif /* _TB_FLUSH_H_ */
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
deleted file mode 100644
index 805235d321..0000000000
--- a/include/exec/tb-hash.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * internal execution defines for qemu
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef EXEC_TB_HASH_H
-#define EXEC_TB_HASH_H
-
-#include "exec/cpu-defs.h"
-#include "exec/exec-all.h"
-#include "qemu/xxhash.h"
-
-#ifdef CONFIG_SOFTMMU
-
-/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
- addresses on the same page. The top bits are the same. This allows
- TLB invalidation to quickly clear a subset of the hash table. */
-#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
-#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
-#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
-#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
-}
-
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-{
- target_ulong tmp;
- tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
- return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
- | (tmp & TB_JMP_ADDR_MASK));
-}
-
-#else
-
-/* In user-mode we can get better hashing because we do not have a TLB */
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
-{
- return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
-}
-
-#endif /* CONFIG_SOFTMMU */
-
-static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
- uint32_t cf_mask, uint32_t trace_vcpu_dstate)
-{
- return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
-}
-
-#endif
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
deleted file mode 100644
index 26921b6daf..0000000000
--- a/include/exec/tb-lookup.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
- *
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-#ifndef EXEC_TB_LOOKUP_H
-#define EXEC_TB_LOOKUP_H
-
-#ifdef NEED_CPU_H
-#include "cpu.h"
-#else
-#include "exec/poison.h"
-#endif
-
-#include "exec/exec-all.h"
-#include "exec/tb-hash.h"
-
-/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *
-tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
- uint32_t *flags, uint32_t cf_mask)
-{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb;
- uint32_t hash;
-
- cpu_get_tb_cpu_state(env, pc, cs_base, flags);
- hash = tb_jmp_cache_hash_func(*pc);
- tb = atomic_rcu_read(&cpu->tb_jmp_cache[hash]);
-
- cf_mask &= ~CF_CLUSTER_MASK;
- cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
-
- if (likely(tb &&
- tb->pc == *pc &&
- tb->cs_base == *cs_base &&
- tb->flags == *flags &&
- tb->trace_vcpu_dstate == *cpu->trace_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
- return tb;
- }
- tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
- if (tb == NULL) {
- return NULL;
- }
- atomic_set(&cpu->tb_jmp_cache[hash], tb);
- return tb;
-}
-
-#endif /* EXEC_TB_LOOKUP_H */
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
new file mode 100644
index 0000000000..dc5a5faa0b
--- /dev/null
+++ b/include/exec/tlb-common.h
@@ -0,0 +1,56 @@
+/*
+ * Common definitions for the softmmu tlb
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef EXEC_TLB_COMMON_H
+#define EXEC_TLB_COMMON_H 1
+
+#define CPU_TLB_ENTRY_BITS 5
+
+/* Minimalized TLB entry for use by TCG fast path. */
+typedef union CPUTLBEntry {
+ struct {
+ uint64_t addr_read;
+ uint64_t addr_write;
+ uint64_t addr_code;
+ /*
+ * Addend to virtual address to get host address. IO accesses
+ * use the corresponding iotlb value.
+ */
+ uintptr_t addend;
+ };
+ /*
+ * Padding to get a power of two size, as well as index
+ * access to addr_{read,write,code}.
+ */
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/*
+ * Data elements that are per MMU mode, accessed by the fast path.
+ * The structure is aligned to aid loading the pair with one insn.
+ */
+typedef struct CPUTLBDescFast {
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
+ uintptr_t mask;
+ /* The array of tlb entries itself. */
+ CPUTLBEntry *table;
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
+
+#endif /* EXEC_TLB_COMMON_H */
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
new file mode 100644
index 0000000000..85c9460c7c
--- /dev/null
+++ b/include/exec/translate-all.h
@@ -0,0 +1,33 @@
+/*
+ * Translated block handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TRANSLATE_ALL_H
+#define TRANSLATE_ALL_H
+
+#include "exec/exec-all.h"
+
+
+/* translate-all.c */
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
+
+#ifdef CONFIG_USER_ONLY
+void page_protect(tb_page_addr_t page_addr);
+int page_unprotect(target_ulong address, uintptr_t pc);
+#endif
+
+#endif /* TRANSLATE_ALL_H */
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
new file mode 100644
index 0000000000..48211c890a
--- /dev/null
+++ b/include/exec/translation-block.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Definition of TranslationBlock.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_TRANSLATION_BLOCK_H
+#define EXEC_TRANSLATION_BLOCK_H
+
+#include "qemu/thread.h"
+#include "exec/cpu-common.h"
+#ifdef CONFIG_USER_ONLY
+#include "qemu/interval-tree.h"
+#endif
+
+/*
+ * Page tracking code uses ram addresses in system mode, and virtual
+ * addresses in userspace mode. Define tb_page_addr_t to be an
+ * appropriate type.
+ */
+#if defined(CONFIG_USER_ONLY)
+typedef vaddr tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
+#else
+typedef ram_addr_t tb_page_addr_t;
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
+#endif
+
+/*
+ * Translation Cache-related fields of a TB.
+ * This struct exists just for convenience; we keep track of TB's in a binary
+ * search tree, and the only fields needed to compare TB's in the tree are
+ * @ptr and @size.
+ * Note: the address of search data can be obtained by adding @size to @ptr.
+ */
+struct tb_tc {
+ const void *ptr; /* pointer to the translated code */
+ size_t size;
+};
+
+struct TranslationBlock {
+ /*
+ * Guest PC corresponding to this block. This must be the true
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
+ * privilege, must store those bits elsewhere.
+ *
+ * If CF_PCREL, the opcodes for the TranslationBlock are written
+ * such that the TB is associated only with the physical page and
+ * may be run in any virtual address context. In this case, PC
+ * must always be taken from ENV in a target-specific manner.
+ * Unwind information is taken as offsets from the page, to be
+ * deposited into the "current" PC.
+ */
+ vaddr pc;
+
+ /*
+ * Target-specific data associated with the TranslationBlock, e.g.:
+ * x86: the original user, the Code Segment virtual base,
+ * arm: an extension of tb->flags,
+ * s390x: instruction data for EXECUTE,
+ * sparc: the next pc of the instruction queue (for delay slots).
+ */
+ uint64_t cs_base;
+
+ uint32_t flags; /* flags defining in which context the code was generated */
+ uint32_t cflags; /* compile flags */
+
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
+#define CF_COUNT_MASK 0x000001ff
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
+#define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
+#define CF_USE_ICOUNT 0x00002000
+#define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
+#define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
+#define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
+#define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
+
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
+ struct tb_tc tc;
+
+ /*
+ * Track tb_page_addr_t intervals that intersect this TB.
+ * For user-only, the virtual addresses are always contiguous,
+ * and we use a unified interval tree. For system, we use a
+ * linked list headed in each PageDesc. Within the list, the lsb
+ * of the previous pointer tells the index of page_next[], and the
+ * list is protected by the PageDesc lock(s).
+ */
+#ifdef CONFIG_USER_ONLY
+ IntervalTreeNode itree;
+#else
+ uintptr_t page_next[2];
+ tb_page_addr_t page_addr[2];
+#endif
+
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continues its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
+ uintptr_t jmp_target_addr[2]; /* target address */
+
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
+ */
+ uintptr_t jmp_list_head;
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_dest[2];
+};
+
+/* The alignment given to TranslationBlock during allocation. */
+#define CODE_GEN_ALIGN 16
+
+/* Hide the qatomic_read to make code a little easier on the eyes */
+static inline uint32_t tb_cflags(const TranslationBlock *tb)
+{
+ return qatomic_read(&tb->cflags);
+}
+
+#endif /* EXEC_TRANSLATION_BLOCK_H */
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 638e1529c5..2c4fb818e7 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -18,13 +18,22 @@
* member in your target-specific DisasContext.
*/
-
#include "qemu/bswap.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
-#include "exec/plugin-gen.h"
-#include "tcg/tcg.h"
+#include "exec/cpu_ldst.h" /* for abi_ptr */
+/**
+ * gen_intermediate_code
+ * @cpu: cpu context
+ * @tb: translation block
+ * @max_insns: max number of instructions to translate
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ *
+ * This function must be provided by the target, which should create
+ * the target-specific DisasContext, and then invoke translator_loop.
+ */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc);
/**
* DisasJumpType:
@@ -63,17 +72,24 @@ typedef enum DisasJumpType {
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
* @singlestep_enabled: "Hardware" single stepping enabled.
+ * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown.
+ * @plugin_enabled: TCG plugin enabled in this TB.
+ * @insn_start: The last op emitted by the insn_start hook,
+ * which is expected to be INDEX_op_insn_start.
*
* Architecture-agnostic disassembly context.
*/
typedef struct DisasContextBase {
TranslationBlock *tb;
- target_ulong pc_first;
- target_ulong pc_next;
+ vaddr pc_first;
+ vaddr pc_next;
DisasJumpType is_jmp;
int num_insns;
int max_insns;
bool singlestep_enabled;
+ bool plugin_enabled;
+ struct TCGOp *insn_start;
+ void *host_addr[2];
} DisasContextBase;
/**
@@ -89,15 +105,6 @@ typedef struct DisasContextBase {
* @insn_start:
* Emit the tcg_gen_insn_start opcode.
*
- * @breakpoint_check:
- * When called, the breakpoint has already been checked to match the PC,
- * but the target may decide the breakpoint missed the address
- * (e.g., due to conditions encoded in their flags). Return true to
- * indicate that the breakpoint did hit, in which case no more breakpoints
- * are checked. If the breakpoint did hit, emit any code required to
- * signal the exception, and set db->is_jmp as necessary to terminate
- * the main loop.
- *
* @translate_insn:
* Disassemble one instruction and set db->pc_next for the start
* of the following instruction. Set db->is_jmp as necessary to
@@ -113,20 +120,20 @@ typedef struct TranslatorOps {
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
- bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
- const CPUBreakpoint *bp);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
- void (*disas_log)(const DisasContextBase *db, CPUState *cpu);
+ void (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f);
} TranslatorOps;
/**
* translator_loop:
- * @ops: Target-specific operations.
- * @db: Disassembly context.
* @cpu: Target vCPU.
* @tb: Translation block.
* @max_insns: Maximum number of insns to translate.
+ * @pc: guest virtual program counter address
+ * @host_pc: host physical program counter address
+ * @ops: Target-specific operations.
+ * @db: Disassembly context.
*
* Generic translator loop.
*
@@ -140,10 +147,29 @@ typedef struct TranslatorOps {
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
- CPUState *cpu, TranslationBlock *tb, int max_insns);
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
+ DisasContextBase *db);
-void translator_loop_temp_check(DisasContextBase *db);
+/**
+ * translator_use_goto_tb
+ * @db: Disassembly context
+ * @dest: target pc of the goto
+ *
+ * Return true if goto_tb is allowed between the current TB
+ * and the destination PC.
+ */
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
+
+/**
+ * translator_io_start
+ * @db: Disassembly context
+ *
+ * If icount is enabled, set cpu->can_do_io, adjust db->is_jmp to
+ * DISAS_TOO_MANY if it is still DISAS_NEXT, and return true.
+ * Otherwise return false.
+ */
+bool translator_io_start(DisasContextBase *db);
/*
* Translator Load Functions
@@ -156,28 +182,64 @@ void translator_loop_temp_check(DisasContextBase *db);
* the relevant information at translation time.
*/
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
- static inline type \
- fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
- { \
- type ret = load_fn(env, pc); \
- if (do_swap) { \
- ret = swap_fn(ret); \
- } \
- plugin_insn_append(&ret, sizeof(ret)); \
- return ret; \
- } \
- \
- static inline type fullname(CPUArchState *env, abi_ptr pc) \
- { \
- return fullname ## _swap(env, pc, false); \
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
+
+static inline uint16_t
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint16_t ret = translator_lduw(env, db, pc);
+ if (do_swap) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+static inline uint32_t
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint32_t ret = translator_ldl(env, db, pc);
+ if (do_swap) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+static inline uint64_t
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
+ abi_ptr pc, bool do_swap)
+{
+ uint64_t ret = translator_ldq(env, db, pc);
+ if (do_swap) {
+ ret = bswap64(ret);
}
+ return ret;
+}
-GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
-GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
-GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
-GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
-#undef GEN_TRANSLATOR_LD
+/**
+ * translator_fake_ldb - fake instruction load
+ * @insn8: byte of instruction
+ * @pc: program counter of instruction
+ *
+ * This is a special case helper used where the instruction we are
+ * about to translate comes from somewhere else (e.g. being
+ * re-synthesised for s390x "ex"). It ensures we update other areas of
+ * the translator with details of the executed instruction.
+ */
+void translator_fake_ldb(uint8_t insn8, abi_ptr pc);
+
+/*
+ * Return whether addr is on the same page as where disassembly started.
+ * Translators can use this to enforce the rule that only single-insn
+ * translation blocks are allowed to cross page boundaries.
+ */
+static inline bool is_same_page(const DisasContextBase *db, vaddr addr)
+{
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
+}
-#endif /* EXEC__TRANSLATOR_H */
+#endif /* EXEC__TRANSLATOR_H */
diff --git a/include/exec/tswap.h b/include/exec/tswap.h
new file mode 100644
index 0000000000..68944a880b
--- /dev/null
+++ b/include/exec/tswap.h
@@ -0,0 +1,72 @@
+/*
+ * Macros for swapping a value if the endianness is different
+ * between the target and the host.
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef TSWAP_H
+#define TSWAP_H
+
+#include "hw/core/cpu.h"
+#include "qemu/bswap.h"
+
+/*
+ * If we're in target-specific code, we can hard-code the swapping
+ * condition, otherwise we have to do (slower) run-time checks.
+ */
+#ifdef NEED_CPU_H
+#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
+#else
+#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
+#endif
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap16(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap32(s);
+ } else {
+ return s;
+ }
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ if (target_needs_bswap()) {
+ return bswap64(s);
+ } else {
+ return s;
+ }
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap16(*s);
+ }
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap32(*s);
+ }
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ if (target_needs_bswap()) {
+ *s = bswap64(*s);
+ }
+}
+
+#endif /* TSWAP_H */
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
index 743b8bb9ea..db4a670328 100644
--- a/include/exec/user/abitypes.h
+++ b/include/exec/user/abitypes.h
@@ -15,7 +15,17 @@
#define ABI_LLONG_ALIGNMENT 2
#endif
-#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) || defined(TARGET_SH4)
+#ifdef TARGET_CRIS
+#define ABI_SHORT_ALIGNMENT 1
+#define ABI_INT_ALIGNMENT 1
+#define ABI_LONG_ALIGNMENT 1
+#define ABI_LLONG_ALIGNMENT 1
+#endif
+
+#if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \
+ || defined(TARGET_SH4) \
+ || defined(TARGET_OPENRISC) \
+ || defined(TARGET_MICROBLAZE)
#define ABI_LLONG_ALIGNMENT 4
#endif
diff --git a/include/exec/user/guest-base.h b/include/exec/user/guest-base.h
new file mode 100644
index 0000000000..afe2ab7fbb
--- /dev/null
+++ b/include/exec/user/guest-base.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Declaration of guest_base.
+ * Copyright (c) 2003 Fabrice Bellard
+ */
+
+#ifndef EXEC_USER_GUEST_BASE_H
+#define EXEC_USER_GUEST_BASE_H
+
+extern uintptr_t guest_base;
+
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
index 7992475c9f..2ebfecf58e 100644
--- a/include/exec/user/thunk.h
+++ b/include/exec/user/thunk.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -42,7 +42,7 @@ typedef enum argtype {
} argtype;
#define MK_PTR(type) TYPE_PTR, type
-#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, (int)(size), type
#define MK_STRUCT(id) TYPE_STRUCT, id
#define THUNK_TARGET 0
@@ -55,6 +55,7 @@ typedef struct {
int *field_offsets[2];
/* special handling */
void (*convert[2])(void *dst, const void *src);
+ void (*print)(void *arg);
int size[2];
int align[2];
const char *name;
@@ -110,8 +111,7 @@ static inline int thunk_type_size(const argtype *type_ptr, int is_host)
if (is_host) {
#if defined(HOST_X86_64)
return 8;
-#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
- defined(HOST_PARISC) || defined(HOST_SPARC64)
+#elif defined(HOST_MIPS) || defined(HOST_SPARC64)
return 4;
#elif defined(HOST_PPC)
return sizeof(void *);
@@ -192,10 +192,17 @@ static inline int thunk_type_align(const argtype *type_ptr, int is_host)
}
}
-unsigned int target_to_host_bitmask(unsigned int target_mask,
- const bitmask_transtbl * trans_tbl);
-unsigned int host_to_target_bitmask(unsigned int host_mask,
- const bitmask_transtbl * trans_tbl);
+unsigned int target_to_host_bitmask_len(unsigned int target_mask,
+ const bitmask_transtbl *trans_tbl,
+ size_t trans_len);
+unsigned int host_to_target_bitmask_len(unsigned int host_mask,
+ const bitmask_transtbl * trans_tbl,
+ size_t trans_len);
+
+#define target_to_host_bitmask(M, T) \
+ target_to_host_bitmask_len(M, T, ARRAY_SIZE(T))
+#define host_to_target_bitmask(M, T) \
+ host_to_target_bitmask_len(M, T, ARRAY_SIZE(T))
void thunk_init(unsigned int max_structs);
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
new file mode 100644
index 0000000000..b9844afc77
--- /dev/null
+++ b/include/exec/vaddr.h
@@ -0,0 +1,18 @@
+/* Define vaddr. */
+
+#ifndef VADDR_H
+#define VADDR_H
+
+/**
+ * vaddr:
+ * Type wide enough to contain any #target_ulong virtual address.
+ */
+typedef uint64_t vaddr;
+#define VADDR_PRId PRId64
+#define VADDR_PRIu PRIu64
+#define VADDR_PRIo PRIo64
+#define VADDR_PRIx PRIx64
+#define VADDR_PRIX PRIX64
+#define VADDR_MAX UINT64_MAX
+
+#endif
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
index 735ed6b653..94cbe073ec 100644
--- a/include/fpu/softfloat-helpers.h
+++ b/include/fpu/softfloat-helpers.h
@@ -48,8 +48,8 @@ this code that are retained.
===============================================================================
*/
-#ifndef _SOFTFLOAT_HELPERS_H_
-#define _SOFTFLOAT_HELPERS_H_
+#ifndef SOFTFLOAT_HELPERS_H
+#define SOFTFLOAT_HELPERS_H
#include "fpu/softfloat-types.h"
@@ -69,7 +69,7 @@ static inline void set_float_exception_flags(int val, float_status *status)
status->float_exception_flags = val;
}
-static inline void set_floatx80_rounding_precision(int val,
+static inline void set_floatx80_rounding_precision(FloatX80RoundPrec val,
float_status *status)
{
status->floatx80_rounding_precision = val;
@@ -95,6 +95,16 @@ static inline void set_snan_bit_is_one(bool val, float_status *status)
status->snan_bit_is_one = val;
}
+static inline void set_use_first_nan(bool val, float_status *status)
+{
+ status->use_first_nan = val;
+}
+
+static inline void set_no_signaling_nans(bool val, float_status *status)
+{
+ status->no_signaling_nans = val;
+}
+
static inline bool get_float_detect_tininess(float_status *status)
{
return status->tininess_before_rounding;
@@ -110,7 +120,8 @@ static inline int get_float_exception_flags(float_status *status)
return status->float_exception_flags;
}
-static inline int get_floatx80_rounding_precision(float_status *status)
+static inline FloatX80RoundPrec
+get_floatx80_rounding_precision(float_status *status)
{
return status->floatx80_rounding_precision;
}
@@ -130,4 +141,4 @@ static inline bool get_default_nan_mode(float_status *status)
return status->default_nan_mode;
}
-#endif /* _SOFTFLOAT_HELPERS_H_ */
+#endif /* SOFTFLOAT_HELPERS_H */
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
index a35ec2893a..f35cdbfa63 100644
--- a/include/fpu/softfloat-macros.h
+++ b/include/fpu/softfloat-macros.h
@@ -8,7 +8,6 @@
* so some portions are provided under:
* the SoftFloat-2a license
* the BSD license
- * GPL-v2-or-later
*
* Any future contributions to this file after December 1st 2014 will be
* taken to be licensed under the Softfloat-2a license unless specifically
@@ -75,14 +74,47 @@ this code that are retained.
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* Portions of this work are licensed under the terms of the GNU GPL,
- * version 2 or later. See the COPYING file in the top-level directory.
- */
-
#ifndef FPU_SOFTFLOAT_MACROS_H
#define FPU_SOFTFLOAT_MACROS_H
#include "fpu/softfloat-types.h"
+#include "qemu/host-utils.h"
+
+/**
+ * shl_double: double-word merging left shift
+ * @l: left or most-significant word
+ * @r: right or least-significant word
+ * @c: shift count
+ *
+ * Shift @l left by @c bits, shifting in bits from @r.
+ */
+static inline uint64_t shl_double(uint64_t l, uint64_t r, int c)
+{
+#if defined(__x86_64__)
+ asm("shld %b2, %1, %0" : "+r"(l) : "r"(r), "ci"(c));
+ return l;
+#else
+ return c ? (l << c) | (r >> (64 - c)) : l;
+#endif
+}
+
+/**
+ * shr_double: double-word merging right shift
+ * @l: left or most-significant word
+ * @r: right or least-significant word
+ * @c: shift count
+ *
+ * Shift @r right by @c bits, shifting in bits from @l.
+ */
+static inline uint64_t shr_double(uint64_t l, uint64_t r, int c)
+{
+#if defined(__x86_64__)
+ asm("shrd %b2, %1, %0" : "+r"(r) : "r"(l), "ci"(c));
+ return r;
+#else
+ return c ? (r >> c) | (l << (64 - c)) : r;
+#endif
+}
/*----------------------------------------------------------------------------
| Shifts `a' right by the number of bits given in `count'. If any nonzero
@@ -403,16 +435,12 @@ static inline void
| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- add128(
- uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void add128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
{
- uint64_t z1;
-
- z1 = a1 + b1;
- *z1Ptr = z1;
- *z0Ptr = a0 + b0 + ( z1 < a1 );
-
+ bool c = 0;
+ *z1Ptr = uadd64_carry(a1, b1, &c);
+ *z0Ptr = uadd64_carry(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -423,34 +451,14 @@ static inline void
| `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- add192(
- uint64_t a0,
- uint64_t a1,
- uint64_t a2,
- uint64_t b0,
- uint64_t b1,
- uint64_t b2,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+static inline void add192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2;
- int8_t carry0, carry1;
-
- z2 = a2 + b2;
- carry1 = ( z2 < a2 );
- z1 = a1 + b1;
- carry0 = ( z1 < a1 );
- z0 = a0 + b0;
- z1 += carry1;
- z0 += ( z1 < carry1 );
- z0 += carry0;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ bool c = 0;
+ *z2Ptr = uadd64_carry(a2, b2, &c);
+ *z1Ptr = uadd64_carry(a1, b1, &c);
+ *z0Ptr = uadd64_carry(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -461,14 +469,12 @@ static inline void
| `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- sub128(
- uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void sub128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr)
{
-
- *z1Ptr = a1 - b1;
- *z0Ptr = a0 - b0 - ( a1 < b1 );
-
+ bool c = 0;
+ *z1Ptr = usub64_borrow(a1, b1, &c);
+ *z0Ptr = usub64_borrow(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -479,34 +485,14 @@ static inline void
| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- sub192(
- uint64_t a0,
- uint64_t a1,
- uint64_t a2,
- uint64_t b0,
- uint64_t b1,
- uint64_t b2,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+static inline void sub192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2;
- int8_t borrow0, borrow1;
-
- z2 = a2 - b2;
- borrow1 = ( a2 < b2 );
- z1 = a1 - b1;
- borrow0 = ( a1 < b1 );
- z0 = a0 - b0;
- z0 -= ( z1 < borrow1 );
- z1 -= borrow1;
- z0 -= borrow0;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ bool c = 0;
+ *z2Ptr = usub64_borrow(a2, b2, &c);
+ *z1Ptr = usub64_borrow(a1, b1, &c);
+ *z0Ptr = usub64_borrow(a0, b0, &c);
}
/*----------------------------------------------------------------------------
@@ -515,27 +501,10 @@ static inline void
| `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
-static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr )
+static inline void
+mul64To128(uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr)
{
- uint32_t aHigh, aLow, bHigh, bLow;
- uint64_t z0, zMiddleA, zMiddleB, z1;
-
- aLow = a;
- aHigh = a>>32;
- bLow = b;
- bHigh = b>>32;
- z1 = ( (uint64_t) aLow ) * bLow;
- zMiddleA = ( (uint64_t) aLow ) * bHigh;
- zMiddleB = ( (uint64_t) aHigh ) * bLow;
- z0 = ( (uint64_t) aHigh ) * bHigh;
- zMiddleA += zMiddleB;
- z0 += ( ( (uint64_t) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 );
- zMiddleA <<= 32;
- z1 += zMiddleA;
- z0 += ( z1 < zMiddleA );
- *z1Ptr = z1;
- *z0Ptr = z0;
-
+ mulu64(z1Ptr, z0Ptr, a, b);
}
/*----------------------------------------------------------------------------
@@ -546,24 +515,14 @@ static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t
*----------------------------------------------------------------------------*/
static inline void
- mul128By64To192(
- uint64_t a0,
- uint64_t a1,
- uint64_t b,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr
- )
+mul128By64To192(uint64_t a0, uint64_t a1, uint64_t b,
+ uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr)
{
- uint64_t z0, z1, z2, more1;
-
- mul64To128( a1, b, &z1, &z2 );
- mul64To128( a0, b, &z0, &more1 );
- add128( z0, more1, 0, z1, &z0, &z1 );
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
+ uint64_t z0, z1, m1;
+ mul64To128(a1, b, &m1, z2Ptr);
+ mul64To128(a0, b, &z0, &z1);
+ add128(z0, z1, 0, m1, z0Ptr, z1Ptr);
}
/*----------------------------------------------------------------------------
@@ -573,34 +532,21 @@ static inline void
| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
*----------------------------------------------------------------------------*/
-static inline void
- mul128To256(
- uint64_t a0,
- uint64_t a1,
- uint64_t b0,
- uint64_t b1,
- uint64_t *z0Ptr,
- uint64_t *z1Ptr,
- uint64_t *z2Ptr,
- uint64_t *z3Ptr
- )
+static inline void mul128To256(uint64_t a0, uint64_t a1,
+ uint64_t b0, uint64_t b1,
+ uint64_t *z0Ptr, uint64_t *z1Ptr,
+ uint64_t *z2Ptr, uint64_t *z3Ptr)
{
- uint64_t z0, z1, z2, z3;
- uint64_t more1, more2;
-
- mul64To128( a1, b1, &z2, &z3 );
- mul64To128( a1, b0, &z1, &more2 );
- add128( z1, more2, 0, z2, &z1, &z2 );
- mul64To128( a0, b0, &z0, &more1 );
- add128( z0, more1, 0, z1, &z0, &z1 );
- mul64To128( a0, b1, &more1, &more2 );
- add128( more1, more2, 0, z2, &more1, &z2 );
- add128( z0, z1, 0, more1, &z0, &z1 );
- *z3Ptr = z3;
- *z2Ptr = z2;
- *z1Ptr = z1;
- *z0Ptr = z0;
+ uint64_t z0, z1, z2;
+ uint64_t m0, m1, m2, n1, n2;
+ mul64To128(a1, b0, &m1, &m2);
+ mul64To128(a0, b1, &n1, &n2);
+ mul64To128(a1, b1, &z2, z3Ptr);
+ mul64To128(a0, b0, &z0, &z1);
+
+ add192( 0, m1, m2, 0, n1, n2, &m0, &m1, &m2);
+ add192(m0, m1, m2, z0, z1, z2, z0Ptr, z1Ptr, z2Ptr);
}
/*----------------------------------------------------------------------------
@@ -634,83 +580,6 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
}
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
- *
- * Licensed under the GPLv2/LGPLv3
- */
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
- uint64_t n0, uint64_t d)
-{
-#if defined(__x86_64__)
- uint64_t q;
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
- return q;
-#elif defined(__s390x__) && !defined(__clang__)
- /* Need to use a TImode type to get an even register pair for DLGR. */
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
- *r = n >> 64;
- return n;
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
- /* From Power ISA 2.06, programming note for divdeu. */
- uint64_t q1, q2, Q, r1, r2, R;
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
- : "=&r"(q1), "=r"(q2)
- : "r"(n1), "r"(n0), "r"(d));
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
- r2 = n0 - (q2 * d);
- Q = q1 + q2;
- R = r1 + r2;
- if (R >= d || R < r2) { /* overflow implies R > d */
- Q += 1;
- R -= d;
- }
- *r = R;
- return Q;
-#else
- uint64_t d0, d1, q0, q1, r1, r0, m;
-
- d0 = (uint32_t)d;
- d1 = d >> 32;
-
- r1 = n1 % d1;
- q1 = n1 / d1;
- m = q1 * d0;
- r1 = (r1 << 32) | (n0 >> 32);
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- if (r1 >= d) {
- if (r1 < m) {
- q1 -= 1;
- r1 += d;
- }
- }
- }
- r1 -= m;
-
- r0 = r1 % d1;
- q0 = r1 / d1;
- m = q0 * d0;
- r0 = (r0 << 32) | (uint32_t)n0;
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- if (r0 >= d) {
- if (r0 < m) {
- q0 -= 1;
- r0 += d;
- }
- }
- }
- r0 -= m;
-
- *r = r0;
- return (q1 << 32) | q0;
-#endif
-}
-
/*----------------------------------------------------------------------------
| Returns an approximation to the square root of the 32-bit significand given
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
@@ -794,4 +663,38 @@ static inline bool ne128(uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1)
return a0 != b0 || a1 != b1;
}
+/*
+ * Similarly, comparisons of 192-bit values.
+ */
+
+static inline bool eq192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
+{
+ return ((a0 ^ b0) | (a1 ^ b1) | (a2 ^ b2)) == 0;
+}
+
+static inline bool le192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
+{
+ if (a0 != b0) {
+ return a0 < b0;
+ }
+ if (a1 != b1) {
+ return a1 < b1;
+ }
+ return a2 <= b2;
+}
+
+static inline bool lt192(uint64_t a0, uint64_t a1, uint64_t a2,
+ uint64_t b0, uint64_t b1, uint64_t b2)
+{
+ if (a0 != b0) {
+ return a0 < b0;
+ }
+ if (a1 != b1) {
+ return a1 < b1;
+ }
+ return a2 < b2;
+}
+
#endif
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
index 7680193ebc..0884ec4ef7 100644
--- a/include/fpu/softfloat-types.h
+++ b/include/fpu/softfloat-types.h
@@ -103,7 +103,7 @@ typedef struct {
#define make_floatx80(exp, mant) ((floatx80) { mant, exp })
#define make_floatx80_init(exp, mant) { .low = mant, .high = exp }
typedef struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint64_t high, low;
#else
uint64_t low, high;
@@ -113,6 +113,11 @@ typedef struct {
#define make_float128_init(high_, low_) { .high = high_, .low = low_ }
/*
+ * Software neural-network floating-point types.
+ */
+typedef uint16_t bfloat16;
+
+/*
* Software IEC/IEEE floating-point underflow tininess-detection mode.
*/
@@ -129,8 +134,10 @@ typedef enum __attribute__((__packed__)) {
float_round_up = 2,
float_round_to_zero = 3,
float_round_ties_away = 4,
- /* Not an IEEE rounding mode: round to the closest odd mantissa value */
+ /* Not an IEEE rounding mode: round to closest odd, overflow to max */
float_round_to_odd = 5,
+ /* Not an IEEE rounding mode: round to closest odd, overflow to inf */
+ float_round_to_odd_inf = 6,
} FloatRoundMode;
/*
@@ -138,15 +145,30 @@ typedef enum __attribute__((__packed__)) {
*/
enum {
- float_flag_invalid = 1,
- float_flag_divbyzero = 4,
- float_flag_overflow = 8,
- float_flag_underflow = 16,
- float_flag_inexact = 32,
- float_flag_input_denormal = 64,
- float_flag_output_denormal = 128
+ float_flag_invalid = 0x0001,
+ float_flag_divbyzero = 0x0002,
+ float_flag_overflow = 0x0004,
+ float_flag_underflow = 0x0008,
+ float_flag_inexact = 0x0010,
+ float_flag_input_denormal = 0x0020,
+ float_flag_output_denormal = 0x0040,
+ float_flag_invalid_isi = 0x0080, /* inf - inf */
+ float_flag_invalid_imz = 0x0100, /* inf * 0 */
+ float_flag_invalid_idi = 0x0200, /* inf / inf */
+ float_flag_invalid_zdz = 0x0400, /* 0 / 0 */
+ float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */
+ float_flag_invalid_cvti = 0x1000, /* non-nan to integer */
+ float_flag_invalid_snan = 0x2000, /* any operand was snan */
};
+/*
+ * Rounding precision for floatx80.
+ */
+typedef enum __attribute__((__packed__)) {
+ floatx80_precision_x,
+ floatx80_precision_d,
+ floatx80_precision_s,
+} FloatX80RoundPrec;
/*
* Floating Point Status. Individual architectures may maintain
@@ -156,17 +178,27 @@ enum {
*/
typedef struct float_status {
+ uint16_t float_exception_flags;
FloatRoundMode float_rounding_mode;
- uint8_t float_exception_flags;
- signed char floatx80_rounding_precision;
+ FloatX80RoundPrec floatx80_rounding_precision;
bool tininess_before_rounding;
/* should denormalised results go to zero and set the inexact flag? */
bool flush_to_zero;
/* should denormalised inputs go to zero and set the input_denormal flag? */
bool flush_inputs_to_zero;
bool default_nan_mode;
- /* not always used -- see snan_bit_is_one() in softfloat-specialize.h */
+ /*
+ * The flags below are not used on all specializations and may
+ * constant fold away (see snan_bit_is_one()/no_signalling_nans() in
+ * softfloat-specialize.inc.c)
+ */
bool snan_bit_is_one;
+ bool use_first_nan;
+ bool no_signaling_nans;
+ /* should overflowed results subtract re_bias to its exponent? */
+ bool rebias_overflow;
+ /* should underflowed results add re_bias to its exponent? */
+ bool rebias_underflow;
} float_status;
#endif /* SOFTFLOAT_TYPES_H */
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 659218b5c7..eb64075b9c 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -95,12 +95,16 @@ typedef enum {
#include "fpu/softfloat-types.h"
#include "fpu/softfloat-helpers.h"
+#include "qemu/int128.h"
/*----------------------------------------------------------------------------
| Routine to raise any or all of the software IEC/IEEE floating-point
| exception flags.
*----------------------------------------------------------------------------*/
-void float_raise(uint8_t flags, float_status *status);
+static inline void float_raise(uint16_t flags, float_status *status)
+{
+ status->float_exception_flags |= flags;
+}
/*----------------------------------------------------------------------------
| If `a' is denormal and we are in flush-to-zero mode then set the
@@ -109,6 +113,7 @@ void float_raise(uint8_t flags, float_status *status);
float16 float16_squash_input_denormal(float16 a, float_status *status);
float32 float32_squash_input_denormal(float32 a, float_status *status);
float64 float64_squash_input_denormal(float64 a, float_status *status);
+bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
/*----------------------------------------------------------------------------
| Options to indicate which negations to perform in float*_muladd()
@@ -136,9 +141,11 @@ float16 uint16_to_float16_scalbn(uint16_t a, int, float_status *status);
float16 uint32_to_float16_scalbn(uint32_t a, int, float_status *status);
float16 uint64_to_float16_scalbn(uint64_t a, int, float_status *status);
+float16 int8_to_float16(int8_t a, float_status *status);
float16 int16_to_float16(int16_t a, float_status *status);
float16 int32_to_float16(int32_t a, float_status *status);
float16 int64_to_float16(int64_t a, float_status *status);
+float16 uint8_to_float16(uint8_t a, float_status *status);
float16 uint16_to_float16(uint16_t a, float_status *status);
float16 uint32_to_float16(uint32_t a, float_status *status);
float16 uint64_to_float16(uint64_t a, float_status *status);
@@ -176,7 +183,9 @@ floatx80 int64_to_floatx80(int64_t, float_status *status);
float128 int32_to_float128(int32_t, float_status *status);
float128 int64_to_float128(int64_t, float_status *status);
+float128 int128_to_float128(Int128, float_status *status);
float128 uint64_to_float128(uint64_t, float_status *status);
+float128 uint128_to_float128(Int128, float_status *status);
/*----------------------------------------------------------------------------
| Software half-precision conversion routines.
@@ -187,10 +196,13 @@ float32 float16_to_float32(float16, bool ieee, float_status *status);
float16 float64_to_float16(float64 a, bool ieee, float_status *status);
float64 float16_to_float64(float16 a, bool ieee, float_status *status);
+int8_t float16_to_int8_scalbn(float16, FloatRoundMode, int,
+ float_status *status);
int16_t float16_to_int16_scalbn(float16, FloatRoundMode, int, float_status *);
int32_t float16_to_int32_scalbn(float16, FloatRoundMode, int, float_status *);
int64_t float16_to_int64_scalbn(float16, FloatRoundMode, int, float_status *);
+int8_t float16_to_int8(float16, float_status *status);
int16_t float16_to_int16(float16, float_status *status);
int32_t float16_to_int32(float16, float_status *status);
int64_t float16_to_int64(float16, float_status *status);
@@ -199,6 +211,8 @@ int16_t float16_to_int16_round_to_zero(float16, float_status *status);
int32_t float16_to_int32_round_to_zero(float16, float_status *status);
int64_t float16_to_int64_round_to_zero(float16, float_status *status);
+uint8_t float16_to_uint8_scalbn(float16 a, FloatRoundMode,
+ int, float_status *status);
uint16_t float16_to_uint16_scalbn(float16 a, FloatRoundMode,
int, float_status *status);
uint32_t float16_to_uint32_scalbn(float16 a, FloatRoundMode,
@@ -206,6 +220,7 @@ uint32_t float16_to_uint32_scalbn(float16 a, FloatRoundMode,
uint64_t float16_to_uint64_scalbn(float16 a, FloatRoundMode,
int, float_status *status);
+uint8_t float16_to_uint8(float16 a, float_status *status);
uint16_t float16_to_uint16(float16 a, float_status *status);
uint32_t float16_to_uint32(float16 a, float_status *status);
uint64_t float16_to_uint64(float16 a, float_status *status);
@@ -231,6 +246,8 @@ float16 float16_minnum(float16, float16, float_status *status);
float16 float16_maxnum(float16, float16, float_status *status);
float16 float16_minnummag(float16, float16, float_status *status);
float16 float16_maxnummag(float16, float16, float_status *status);
+float16 float16_minimum_number(float16, float16, float_status *status);
+float16 float16_maximum_number(float16, float16, float_status *status);
float16 float16_sqrt(float16, float_status *status);
FloatRelation float16_compare(float16, float16, float_status *status);
FloatRelation float16_compare_quiet(float16, float16, float_status *status);
@@ -264,6 +281,11 @@ static inline bool float16_is_zero_or_denormal(float16 a)
return (float16_val(a) & 0x7c00) == 0;
}
+static inline bool float16_is_normal(float16 a)
+{
+ return (((float16_val(a) >> 10) + 1) & 0x1f) >= 2;
+}
+
static inline float16 float16_abs(float16 a)
{
/* Note that abs does *not* handle NaN specially, nor does
@@ -285,6 +307,47 @@ static inline float16 float16_set_sign(float16 a, int sign)
return make_float16((float16_val(a) & 0x7fff) | (sign << 15));
}
+static inline bool float16_eq(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool float16_le(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float16_lt(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool float16_unordered(float16 a, float16 b, float_status *s)
+{
+ return float16_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool float16_eq_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool float16_le_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool float16_lt_quiet(float16 a, float16 b, float_status *s)
+{
+ return float16_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool float16_unordered_quiet(float16 a, float16 b,
+ float_status *s)
+{
+ return float16_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
#define float16_zero make_float16(0)
#define float16_half make_float16(0x3800)
#define float16_one make_float16(0x3c00)
@@ -294,6 +357,200 @@ static inline float16 float16_set_sign(float16 a, int sign)
#define float16_infinity make_float16(0x7c00)
/*----------------------------------------------------------------------------
+| Software bfloat16 conversion routines.
+*----------------------------------------------------------------------------*/
+
+bfloat16 bfloat16_round_to_int(bfloat16, float_status *status);
+bfloat16 float32_to_bfloat16(float32, float_status *status);
+float32 bfloat16_to_float32(bfloat16, float_status *status);
+bfloat16 float64_to_bfloat16(float64 a, float_status *status);
+float64 bfloat16_to_float64(bfloat16 a, float_status *status);
+
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
+ int, float_status *status);
+
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
+int16_t bfloat16_to_int16(bfloat16, float_status *status);
+int32_t bfloat16_to_int32(bfloat16, float_status *status);
+int64_t bfloat16_to_int64(bfloat16, float_status *status);
+
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
+int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
+int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
+int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
+
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
+ int, float_status *status);
+
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
+uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
+uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
+uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
+
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
+uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
+uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
+uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
+
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
+bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
+bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
+bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
+bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
+bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
+bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
+
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
+bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
+bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
+bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
+bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
+bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
+bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
+
+/*----------------------------------------------------------------------------
+| Software bfloat16 operations.
+*----------------------------------------------------------------------------*/
+
+bfloat16 bfloat16_add(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_sub(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_mul(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_div(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_muladd(bfloat16, bfloat16, bfloat16, int,
+ float_status *status);
+float16 bfloat16_scalbn(bfloat16, int, float_status *status);
+bfloat16 bfloat16_min(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_max(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minnum(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maxnum(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minnummag(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maxnummag(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_minimum_number(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_maximum_number(bfloat16, bfloat16, float_status *status);
+bfloat16 bfloat16_sqrt(bfloat16, float_status *status);
+FloatRelation bfloat16_compare(bfloat16, bfloat16, float_status *status);
+FloatRelation bfloat16_compare_quiet(bfloat16, bfloat16, float_status *status);
+
+bool bfloat16_is_quiet_nan(bfloat16, float_status *status);
+bool bfloat16_is_signaling_nan(bfloat16, float_status *status);
+bfloat16 bfloat16_silence_nan(bfloat16, float_status *status);
+bfloat16 bfloat16_default_nan(float_status *status);
+
+static inline bool bfloat16_is_any_nan(bfloat16 a)
+{
+ return ((a & ~0x8000) > 0x7F80);
+}
+
+static inline bool bfloat16_is_neg(bfloat16 a)
+{
+ return a >> 15;
+}
+
+static inline bool bfloat16_is_infinity(bfloat16 a)
+{
+ return (a & 0x7fff) == 0x7F80;
+}
+
+static inline bool bfloat16_is_zero(bfloat16 a)
+{
+ return (a & 0x7fff) == 0;
+}
+
+static inline bool bfloat16_is_zero_or_denormal(bfloat16 a)
+{
+ return (a & 0x7F80) == 0;
+}
+
+static inline bool bfloat16_is_normal(bfloat16 a)
+{
+ return (((a >> 7) + 1) & 0xff) >= 2;
+}
+
+static inline bfloat16 bfloat16_abs(bfloat16 a)
+{
+ /* Note that abs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return a & 0x7fff;
+}
+
+static inline bfloat16 bfloat16_chs(bfloat16 a)
+{
+ /* Note that chs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return a ^ 0x8000;
+}
+
+static inline bfloat16 bfloat16_set_sign(bfloat16 a, int sign)
+{
+ return (a & 0x7fff) | (sign << 15);
+}
+
+static inline bool bfloat16_eq(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) == float_relation_equal;
+}
+
+static inline bool bfloat16_le(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) <= float_relation_equal;
+}
+
+static inline bool bfloat16_lt(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) < float_relation_equal;
+}
+
+static inline bool bfloat16_unordered(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare(a, b, s) == float_relation_unordered;
+}
+
+static inline bool bfloat16_eq_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) == float_relation_equal;
+}
+
+static inline bool bfloat16_le_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) <= float_relation_equal;
+}
+
+static inline bool bfloat16_lt_quiet(bfloat16 a, bfloat16 b, float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) < float_relation_equal;
+}
+
+static inline bool bfloat16_unordered_quiet(bfloat16 a, bfloat16 b,
+ float_status *s)
+{
+ return bfloat16_compare_quiet(a, b, s) == float_relation_unordered;
+}
+
+#define bfloat16_zero 0
+#define bfloat16_half 0x3f00
+#define bfloat16_one 0x3f80
+#define bfloat16_one_point_five 0x3fc0
+#define bfloat16_two 0x4000
+#define bfloat16_three 0x4040
+#define bfloat16_infinity 0x7f80
+
+/*----------------------------------------------------------------------------
| The pattern for a default generated half-precision NaN.
*----------------------------------------------------------------------------*/
float16 float16_default_nan(float_status *status);
@@ -351,6 +608,8 @@ float32 float32_minnum(float32, float32, float_status *status);
float32 float32_maxnum(float32, float32, float_status *status);
float32 float32_minnummag(float32, float32, float_status *status);
float32 float32_maxnummag(float32, float32, float_status *status);
+float32 float32_minimum_number(float32, float32, float_status *status);
+float32 float32_maximum_number(float32, float32, float_status *status);
bool float32_is_quiet_nan(float32, float_status *status);
bool float32_is_signaling_nan(float32, float_status *status);
float32 float32_silence_nan(float32, float_status *status);
@@ -504,6 +763,9 @@ int16_t float64_to_int16_round_to_zero(float64, float_status *status);
int32_t float64_to_int32_round_to_zero(float64, float_status *status);
int64_t float64_to_int64_round_to_zero(float64, float_status *status);
+int32_t float64_to_int32_modulo(float64, FloatRoundMode, float_status *status);
+int64_t float64_to_int64_modulo(float64, FloatRoundMode, float_status *status);
+
uint16_t float64_to_uint16_scalbn(float64, FloatRoundMode, int, float_status *);
uint32_t float64_to_uint32_scalbn(float64, FloatRoundMode, int, float_status *);
uint64_t float64_to_uint64_scalbn(float64, FloatRoundMode, int, float_status *);
@@ -540,6 +802,8 @@ float64 float64_minnum(float64, float64, float_status *status);
float64 float64_maxnum(float64, float64, float_status *status);
float64 float64_minnummag(float64, float64, float_status *status);
float64 float64_maxnummag(float64, float64, float_status *status);
+float64 float64_minimum_number(float64, float64, float_status *status);
+float64 float64_maximum_number(float64, float64, float_status *status);
bool float64_is_quiet_nan(float64 a, float_status *status);
bool float64_is_signaling_nan(float64, float_status *status);
float64 float64_silence_nan(float64, float_status *status);
@@ -663,6 +927,18 @@ static inline bool float64_unordered_quiet(float64 a, float64 b,
float64 float64_default_nan(float_status *status);
/*----------------------------------------------------------------------------
+| Software IEC/IEEE double-precision operations, rounding to single precision,
+| returning a result in double precision, with only one rounding step.
+*----------------------------------------------------------------------------*/
+
+float64 float64r32_add(float64, float64, float_status *status);
+float64 float64r32_sub(float64, float64, float_status *status);
+float64 float64r32_mul(float64, float64, float_status *status);
+float64 float64r32_div(float64, float64, float_status *status);
+float64 float64r32_muladd(float64, float64, float64, int, float_status *status);
+float64 float64r32_sqrt(float64, float_status *status);
+
+/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision conversion routines.
*----------------------------------------------------------------------------*/
int32_t floatx80_to_int32(floatx80, float_status *status);
@@ -914,7 +1190,7 @@ floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status);
| Floating-Point Arithmetic.
*----------------------------------------------------------------------------*/
-floatx80 roundAndPackFloatx80(int8_t roundingPrecision, bool zSign,
+floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign,
int32_t zExp, uint64_t zSig0, uint64_t zSig1,
float_status *status);
@@ -927,7 +1203,7 @@ floatx80 roundAndPackFloatx80(int8_t roundingPrecision, bool zSign,
| normalized.
*----------------------------------------------------------------------------*/
-floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision,
+floatx80 normalizeRoundAndPackFloatx80(FloatX80RoundPrec roundingPrecision,
bool zSign, int32_t zExp,
uint64_t zSig0, uint64_t zSig1,
float_status *status);
@@ -943,9 +1219,13 @@ floatx80 floatx80_default_nan(float_status *status);
int32_t float128_to_int32(float128, float_status *status);
int32_t float128_to_int32_round_to_zero(float128, float_status *status);
int64_t float128_to_int64(float128, float_status *status);
+Int128 float128_to_int128(float128, float_status *status);
int64_t float128_to_int64_round_to_zero(float128, float_status *status);
+Int128 float128_to_int128_round_to_zero(float128, float_status *status);
uint64_t float128_to_uint64(float128, float_status *status);
+Int128 float128_to_uint128(float128, float_status *status);
uint64_t float128_to_uint64_round_to_zero(float128, float_status *status);
+Int128 float128_to_uint128_round_to_zero(float128, float_status *status);
uint32_t float128_to_uint32(float128, float_status *status);
uint32_t float128_to_uint32_round_to_zero(float128, float_status *status);
float32 float128_to_float32(float128, float_status *status);
@@ -959,11 +1239,21 @@ float128 float128_round_to_int(float128, float_status *status);
float128 float128_add(float128, float128, float_status *status);
float128 float128_sub(float128, float128, float_status *status);
float128 float128_mul(float128, float128, float_status *status);
+float128 float128_muladd(float128, float128, float128, int,
+ float_status *status);
float128 float128_div(float128, float128, float_status *status);
float128 float128_rem(float128, float128, float_status *status);
float128 float128_sqrt(float128, float_status *status);
FloatRelation float128_compare(float128, float128, float_status *status);
FloatRelation float128_compare_quiet(float128, float128, float_status *status);
+float128 float128_min(float128, float128, float_status *status);
+float128 float128_max(float128, float128, float_status *status);
+float128 float128_minnum(float128, float128, float_status *status);
+float128 float128_maxnum(float128, float128, float_status *status);
+float128 float128_minnummag(float128, float128, float_status *status);
+float128 float128_maxnummag(float128, float128, float_status *status);
+float128 float128_minimum_number(float128, float128, float_status *status);
+float128 float128_maximum_number(float128, float128, float_status *status);
bool float128_is_quiet_nan(float128, float_status *status);
bool float128_is_signaling_nan(float128, float_status *status);
float128 float128_silence_nan(float128, float_status *status);
diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h
new file mode 100644
index 0000000000..c573aef2dc
--- /dev/null
+++ b/include/gdbstub/helpers.h
@@ -0,0 +1,103 @@
+/*
+ * gdbstub helpers
+ *
+ * These are all used by the various frontends and have to be host
+ * aware to ensure things are store in target order.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _GDBSTUB_HELPERS_H_
+#define _GDBSTUB_HELPERS_H_
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
+
+/*
+ * The GDB remote protocol transfers values in target byte order. As
+ * the gdbstub may be batching up several register values we always
+ * append to the array.
+ */
+
+static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
+{
+ g_byte_array_append(buf, &val, 1);
+ return 1;
+}
+
+static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
+{
+ uint16_t to_word = tswap16(val);
+ g_byte_array_append(buf, (uint8_t *) &to_word, 2);
+ return 2;
+}
+
+static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
+{
+ uint32_t to_long = tswap32(val);
+ g_byte_array_append(buf, (uint8_t *) &to_long, 4);
+ return 4;
+}
+
+static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
+{
+ uint64_t to_quad = tswap64(val);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ return 8;
+}
+
+static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
+ uint64_t val_lo)
+{
+ uint64_t to_quad;
+#if TARGET_BIG_ENDIAN
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#else
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#endif
+ return 16;
+}
+
+static inline int gdb_get_zeroes(GByteArray *array, size_t len)
+{
+ guint oldlen = array->len;
+ g_byte_array_set_size(array, oldlen + len);
+ memset(array->data + oldlen, 0, len);
+
+ return len;
+}
+
+/**
+ * gdb_get_reg_ptr: get pointer to start of last element
+ * @len: length of element
+ *
+ * This is a helper function to extract the pointer to the last
+ * element for additional processing. Some front-ends do additional
+ * dynamic swapping of the elements based on CPU state.
+ */
+static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len)
+{
+ return buf->data + buf->len - len;
+}
+
+#if TARGET_LONG_BITS == 64
+#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
+#define ldtul_p(addr) ldq_p(addr)
+#else
+#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
+#define ldtul_p(addr) ldl_p(addr)
+#endif
+
+#else
+#error "gdbstub helpers should only be included by target specific code"
+#endif
+
+#endif /* _GDBSTUB_HELPERS_H_ */
diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h
new file mode 100644
index 0000000000..54ff7245a1
--- /dev/null
+++ b/include/gdbstub/syscalls.h
@@ -0,0 +1,122 @@
+/*
+ * GDB Syscall support
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef _SYSCALLS_H_
+#define _SYSCALLS_H_
+
+/* For gdb file i/o remote protocol open flags. */
+#define GDB_O_RDONLY 0
+#define GDB_O_WRONLY 1
+#define GDB_O_RDWR 2
+#define GDB_O_APPEND 8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+/* For gdb file i/o remote protocol errno values */
+#define GDB_EPERM 1
+#define GDB_ENOENT 2
+#define GDB_EINTR 4
+#define GDB_EBADF 9
+#define GDB_EACCES 13
+#define GDB_EFAULT 14
+#define GDB_EBUSY 16
+#define GDB_EEXIST 17
+#define GDB_ENODEV 19
+#define GDB_ENOTDIR 20
+#define GDB_EISDIR 21
+#define GDB_EINVAL 22
+#define GDB_ENFILE 23
+#define GDB_EMFILE 24
+#define GDB_EFBIG 27
+#define GDB_ENOSPC 28
+#define GDB_ESPIPE 29
+#define GDB_EROFS 30
+#define GDB_ENAMETOOLONG 91
+#define GDB_EUNKNOWN 9999
+
+/* For gdb file i/o remote protocol lseek whence. */
+#define GDB_SEEK_SET 0
+#define GDB_SEEK_CUR 1
+#define GDB_SEEK_END 2
+
+/* For gdb file i/o stat/fstat. */
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err);
+
+/**
+ * gdb_do_syscall:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * ...: list of arguments to interpolate into @fmt
+ *
+ * Send a GDB syscall request. This function will return immediately;
+ * the callback function will be called later when the remote system
+ * call has completed.
+ *
+ * @fmt should be in the 'call-id,parameter,parameter...' format documented
+ * for the F request packet in the GDB remote protocol. A limited set of
+ * printf-style format specifiers is supported:
+ * %x - target_ulong argument printed in hex
+ * %lx - 64-bit argument printed in hex
+ * %s - string pointer (target_ulong) and length (int) pair
+ */
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+
+/**
+ * use_gdb_syscalls() - report if GDB should be used for syscalls
+ *
+ * This is mostly driven by the semihosting mode the user configures
+ * but assuming GDB is allowed by that we report true if GDB is
+ * connected to the stub.
+ */
+int use_gdb_syscalls(void);
+
+/**
+ * gdb_exit: exit gdb session, reporting inferior status
+ * @code: exit code reported
+ *
+ * This closes the session and sends a final packet to GDB reporting
+ * the exit status of the program. It also cleans up any connections
+ * detritus before returning.
+ */
+void gdb_exit(int code);
+
+/**
+ * gdb_qemu_exit: ask qemu to exit
+ * @code: exit code reported
+ *
+ * This requests qemu to exit. This function is allowed to return as
+ * the exit request might be processed asynchronously by qemu backend.
+ */
+void gdb_qemu_exit(int code);
+
+#endif /* _SYSCALLS_H_ */
diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h
new file mode 100644
index 0000000000..3b8358e3da
--- /dev/null
+++ b/include/gdbstub/user.h
@@ -0,0 +1,67 @@
+/*
+ * gdbstub user-mode only APIs
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef GDBSTUB_USER_H
+#define GDBSTUB_USER_H
+
+#define MAX_SIGINFO_LENGTH 128
+
+/**
+ * gdb_handlesig() - yield control to gdb
+ * @cpu: CPU
+ * @sig: if non-zero, the signal number which caused us to stop
+ * @reason: stop reason for stop reply packet or NULL
+ * @siginfo: target-specific siginfo struct
+ * @siginfo_len: target-specific siginfo struct length
+ *
+ * This function yields control to gdb, when a user-mode-only target
+ * needs to stop execution. If @sig is non-zero, then we will send a
+ * stop packet to tell gdb that we have stopped because of this signal.
+ *
+ * This function will block (handling protocol requests from gdb)
+ * until gdb tells us to continue target execution. When it does
+ * return, the return value is a signal to deliver to the target,
+ * or 0 if no signal should be delivered, ie the signal that caused
+ * us to stop should be ignored.
+ */
+int gdb_handlesig(CPUState *, int, const char *, void *, int);
+
+/**
+ * gdb_signalled() - inform remote gdb of sig exit
+ * @as: current CPUArchState
+ * @sig: signal number
+ */
+void gdb_signalled(CPUArchState *as, int sig);
+
+/**
+ * gdbserver_fork_start() - inform gdb of the upcoming fork()
+ */
+void gdbserver_fork_start(void);
+
+/**
+ * gdbserver_fork_end() - inform gdb of the completed fork()
+ * @cs: CPU
+ * @pid: 0 if in child process, -1 if fork failed, child process pid otherwise
+ */
+void gdbserver_fork_end(CPUState *cs, pid_t pid);
+
+/**
+ * gdb_syscall_entry() - inform gdb of syscall entry and yield control to it
+ * @cs: CPU
+ * @num: syscall number
+ */
+void gdb_syscall_entry(CPUState *cs, int num);
+
+/**
+ * gdb_syscall_entry() - inform gdb of syscall return and yield control to it
+ * @cs: CPU
+ * @num: syscall number
+ */
+void gdb_syscall_return(CPUState *cs, int num);
+
+#endif /* GDBSTUB_USER_H */
diff --git a/include/glib-compat.h b/include/glib-compat.h
index 0b0ec76299..43a562974d 100644
--- a/include/glib-compat.h
+++ b/include/glib-compat.h
@@ -19,17 +19,22 @@
/* Ask for warnings for anything that was marked deprecated in
* the defined version, or before. It is a candidate for rewrite.
*/
-#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_48
+#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_56
/* Ask for warnings if code tries to use function that did not
* exist in the defined version. These risk breaking builds
*/
-#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_48
+#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_56
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include <glib.h>
+#if defined(G_OS_UNIX)
+#include <glib-unix.h>
+#include <sys/types.h>
+#include <pwd.h>
+#endif
/*
* Note that because of the GLIB_VERSION_MAX_ALLOWED constant above, allowing
@@ -41,9 +46,9 @@
* int g_foo(const char *wibble)
*
* We must define a static inline function with the same signature that does
- * what we need, but with a "_qemu" suffix e.g.
+ * what we need, but with a "_compat" suffix e.g.
*
- * static inline void g_foo_qemu(const char *wibble)
+ * static inline void g_foo_compat(const char *wibble)
* {
* #if GLIB_CHECK_VERSION(X, Y, 0)
* g_foo(wibble)
@@ -56,22 +61,94 @@
* ensuring this wrapper function impl doesn't trigger the compiler warning
* about using too new glib APIs. Finally we can do
*
- * #define g_foo(a) g_foo_qemu(a)
+ * #define g_foo(a) g_foo_compat(a)
*
* So now the code elsewhere in QEMU, which *does* have the
* -Wdeprecated-declarations warning active, can call g_foo(...) as normal,
* without generating warnings.
*/
-#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0)
/*
- * g_poll has a problem on Windows when using
- * timeouts < 10ms, so use wrapper.
+ * g_memdup2_qemu:
+ * @mem: (nullable): the memory to copy.
+ * @byte_size: the number of bytes to copy.
+ *
+ * Allocates @byte_size bytes of memory, and copies @byte_size bytes into it
+ * from @mem. If @mem is %NULL it returns %NULL.
+ *
+ * This replaces g_memdup(), which was prone to integer overflows when
+ * converting the argument from a #gsize to a #guint.
+ *
+ * This static inline version is a backport of the new public API from
+ * GLib 2.68, kept internal to GLib for backport to older stable releases.
+ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2319.
+ *
+ * Returns: (nullable): a pointer to the newly-allocated copy of the memory,
+ * or %NULL if @mem is %NULL.
*/
-#define g_poll(fds, nfds, timeout) g_poll_fixed(fds, nfds, timeout)
-gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout);
+static inline gpointer g_memdup2_qemu(gconstpointer mem, gsize byte_size)
+{
+#if GLIB_CHECK_VERSION(2, 68, 0)
+ return g_memdup2(mem, byte_size);
+#else
+ gpointer new_mem;
+
+ if (mem && byte_size != 0) {
+ new_mem = g_malloc(byte_size);
+ memcpy(new_mem, mem, byte_size);
+ } else {
+ new_mem = NULL;
+ }
+
+ return new_mem;
#endif
+}
+#define g_memdup2(m, s) g_memdup2_qemu(m, s)
+
+#if defined(G_OS_UNIX)
+/*
+ * Note: The fallback implementation is not MT-safe, and it returns a copy of
+ * the libc passwd (must be g_free() after use) but not the content. Because of
+ * these important differences the caller must be aware of, it's not #define for
+ * GLib API substitution.
+ */
+static inline struct passwd *
+g_unix_get_passwd_entry_qemu(const gchar *user_name, GError **error)
+{
+#if GLIB_CHECK_VERSION(2, 64, 0)
+ return g_unix_get_passwd_entry(user_name, error);
+#else
+ struct passwd *p = getpwnam(user_name);
+ if (!p) {
+ g_set_error_literal(error, G_UNIX_ERROR, 0, g_strerror(errno));
+ return NULL;
+ }
+ return (struct passwd *)g_memdup(p, sizeof(*p));
+#endif
+}
+#endif /* G_OS_UNIX */
+
+static inline bool
+qemu_g_test_slow(void)
+{
+ static int cached = -1;
+ if (cached == -1) {
+ cached = g_test_slow() || getenv("G_TEST_SLOW") != NULL;
+ }
+ return cached;
+}
+
+#undef g_test_slow
+#undef g_test_thorough
+#undef g_test_quick
+#define g_test_slow() qemu_g_test_slow()
+#define g_test_thorough() qemu_g_test_slow()
+#define g_test_quick() (!qemu_g_test_slow())
#pragma GCC diagnostic pop
+#ifndef G_NORETURN
+#define G_NORETURN G_GNUC_NORETURN
+#endif
+
#endif
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 38a42f409a..0e6e82b339 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -41,38 +41,13 @@ enum {
};
typedef struct AcpiRsdpData {
- uint8_t oem_id[6] QEMU_NONSTRING; /* OEM identification */
+ char *oem_id; /* OEM identification */
uint8_t revision; /* Must be 0 for 1.0, 2 for 2.0 */
unsigned *rsdt_tbl_offset;
unsigned *xsdt_tbl_offset;
} AcpiRsdpData;
-/* Table structure from Linux kernel (the ACPI tables are under the
- BSD license) */
-
-
-#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
- uint32_t signature; /* ACPI signature (4 ASCII characters) */ \
- uint32_t length; /* Length of table, in bytes, including header */ \
- uint8_t revision; /* ACPI Specification minor version # */ \
- uint8_t checksum; /* To make sum of entire table == 0 */ \
- uint8_t oem_id[6] \
- QEMU_NONSTRING; /* OEM identification */ \
- uint8_t oem_table_id[8] \
- QEMU_NONSTRING; /* OEM table identification */ \
- uint32_t oem_revision; /* OEM revision number */ \
- uint8_t asl_compiler_id[4] \
- QEMU_NONSTRING; /* ASL compiler vendor ID */ \
- uint32_t asl_compiler_revision; /* ASL compiler revision number */
-
-
-/* ACPI common table header */
-struct AcpiTableHeader {
- ACPI_TABLE_HEADER_DEF
-} QEMU_PACKED;
-typedef struct AcpiTableHeader AcpiTableHeader;
-
struct AcpiGenericAddress {
uint8_t space_id; /* Address space where struct or register exists */
uint8_t bit_width; /* Size in bits of given register */
@@ -80,7 +55,7 @@ struct AcpiGenericAddress {
uint8_t access_width; /* ACPI 3.0: Minimum Access size (ACPI 3.0),
ACPI 2.0: Reserved, Table 5-1 */
uint64_t address; /* 64-bit address of struct or register */
-} QEMU_PACKED;
+};
typedef struct AcpiFadtData {
struct AcpiGenericAddress pm1a_cnt; /* PM1a_CNT_BLK */
@@ -102,6 +77,7 @@ typedef struct AcpiFadtData {
uint16_t plvl2_lat; /* P_LVL2_LAT */
uint16_t plvl3_lat; /* P_LVL3_LAT */
uint16_t arm_boot_arch; /* ARM_BOOT_ARCH */
+ uint16_t iapc_boot_arch; /* IAPC_BOOT_ARCH */
uint8_t minor_ver; /* FADT Minor Version */
/*
@@ -114,508 +90,40 @@ typedef struct AcpiFadtData {
unsigned *xdsdt_tbl_offset;
} AcpiFadtData;
-#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
-#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)
-
-/*
- * Serial Port Console Redirection Table (SPCR), Rev. 1.02
- *
- * For .interface_type see Debug Port Table 2 (DBG2) serial port
- * subtypes in Table 3, Rev. May 22, 2012
- */
-struct AcpiSerialPortConsoleRedirection {
- ACPI_TABLE_HEADER_DEF
- uint8_t interface_type;
- uint8_t reserved1[3];
- struct AcpiGenericAddress base_address;
- uint8_t interrupt_types;
- uint8_t irq;
- uint32_t gsi;
- uint8_t baud;
- uint8_t parity;
- uint8_t stopbits;
- uint8_t flowctrl;
- uint8_t term_type;
- uint8_t reserved2;
- uint16_t pci_device_id;
- uint16_t pci_vendor_id;
- uint8_t pci_bus;
- uint8_t pci_slot;
- uint8_t pci_func;
+typedef struct AcpiGas {
+ uint8_t id; /* Address space ID */
+ uint8_t width; /* Register bit width */
+ uint8_t offset; /* Register bit offset */
+ uint8_t size; /* Access size */
+ uint64_t addr; /* Address */
+} AcpiGas;
+
+/* SPCR (Serial Port Console Redirection table) */
+typedef struct AcpiSpcrData {
+ uint8_t interface_type;
+ uint8_t reserved[3];
+ struct AcpiGas base_addr;
+ uint8_t interrupt_type;
+ uint8_t pc_interrupt;
+ uint32_t interrupt; /* Global system interrupt */
+ uint8_t baud_rate;
+ uint8_t parity;
+ uint8_t stop_bits;
+ uint8_t flow_control;
+ uint8_t terminal_type;
+ uint8_t language;
+ uint8_t reserved1;
+ uint16_t pci_device_id; /* Must be 0xffff if not PCI device */
+ uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */
+ uint8_t pci_bus;
+ uint8_t pci_device;
+ uint8_t pci_function;
uint32_t pci_flags;
- uint8_t pci_seg;
- uint32_t reserved3;
-} QEMU_PACKED;
-typedef struct AcpiSerialPortConsoleRedirection
- AcpiSerialPortConsoleRedirection;
-
-/*
- * ACPI 1.0 Root System Description Table (RSDT)
- */
-struct AcpiRsdtDescriptorRev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t table_offset_entry[]; /* Array of pointers to other */
- /* ACPI tables */
-} QEMU_PACKED;
-typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1;
-
-/*
- * ACPI 2.0 eXtended System Description Table (XSDT)
- */
-struct AcpiXsdtDescriptorRev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint64_t table_offset_entry[]; /* Array of pointers to other */
- /* ACPI tables */
-} QEMU_PACKED;
-typedef struct AcpiXsdtDescriptorRev2 AcpiXsdtDescriptorRev2;
-
-/*
- * ACPI 1.0 Firmware ACPI Control Structure (FACS)
- */
-struct AcpiFacsDescriptorRev1 {
- uint32_t signature; /* ACPI Signature */
- uint32_t length; /* Length of structure, in bytes */
- uint32_t hardware_signature; /* Hardware configuration signature */
- uint32_t firmware_waking_vector; /* ACPI OS waking vector */
- uint32_t global_lock; /* Global Lock */
- uint32_t flags;
- uint8_t resverved3 [40]; /* Reserved - must be zero */
-} QEMU_PACKED;
-typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1;
-
-/*
- * Differentiated System Description Table (DSDT)
- */
-
-/*
- * MADT values and structures
- */
-
-/* Values for MADT PCATCompat */
-
-#define ACPI_DUAL_PIC 0
-#define ACPI_MULTIPLE_APIC 1
-
-/* Master MADT */
-
-struct AcpiMultipleApicTable {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t local_apic_address; /* Physical address of local APIC */
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMultipleApicTable AcpiMultipleApicTable;
-
-/* Values for Type in APIC sub-headers */
-
-#define ACPI_APIC_PROCESSOR 0
-#define ACPI_APIC_IO 1
-#define ACPI_APIC_XRUPT_OVERRIDE 2
-#define ACPI_APIC_NMI 3
-#define ACPI_APIC_LOCAL_NMI 4
-#define ACPI_APIC_ADDRESS_OVERRIDE 5
-#define ACPI_APIC_IO_SAPIC 6
-#define ACPI_APIC_LOCAL_SAPIC 7
-#define ACPI_APIC_XRUPT_SOURCE 8
-#define ACPI_APIC_LOCAL_X2APIC 9
-#define ACPI_APIC_LOCAL_X2APIC_NMI 10
-#define ACPI_APIC_GENERIC_CPU_INTERFACE 11
-#define ACPI_APIC_GENERIC_DISTRIBUTOR 12
-#define ACPI_APIC_GENERIC_MSI_FRAME 13
-#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14
-#define ACPI_APIC_GENERIC_TRANSLATOR 15
-#define ACPI_APIC_RESERVED 16 /* 16 and greater are reserved */
-
-/*
- * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE)
- */
-#define ACPI_SUB_HEADER_DEF /* Common ACPI sub-structure header */\
- uint8_t type; \
- uint8_t length;
-
-/* Sub-structures for MADT */
-
-struct AcpiMadtProcessorApic {
- ACPI_SUB_HEADER_DEF
- uint8_t processor_id; /* ACPI processor id */
- uint8_t local_apic_id; /* Processor's local APIC id */
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMadtProcessorApic AcpiMadtProcessorApic;
-
-struct AcpiMadtIoApic {
- ACPI_SUB_HEADER_DEF
- uint8_t io_apic_id; /* I/O APIC ID */
- uint8_t reserved; /* Reserved - must be zero */
- uint32_t address; /* APIC physical address */
- uint32_t interrupt; /* Global system interrupt where INTI
- * lines start */
-} QEMU_PACKED;
-typedef struct AcpiMadtIoApic AcpiMadtIoApic;
-
-struct AcpiMadtIntsrcovr {
- ACPI_SUB_HEADER_DEF
- uint8_t bus;
- uint8_t source;
- uint32_t gsi;
- uint16_t flags;
-} QEMU_PACKED;
-typedef struct AcpiMadtIntsrcovr AcpiMadtIntsrcovr;
-
-struct AcpiMadtLocalNmi {
- ACPI_SUB_HEADER_DEF
- uint8_t processor_id; /* ACPI processor id */
- uint16_t flags; /* MPS INTI flags */
- uint8_t lint; /* Local APIC LINT# */
-} QEMU_PACKED;
-typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi;
-
-struct AcpiMadtProcessorX2Apic {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t x2apic_id; /* Processor's local x2APIC ID */
- uint32_t flags;
- uint32_t uid; /* Processor object _UID */
-} QEMU_PACKED;
-typedef struct AcpiMadtProcessorX2Apic AcpiMadtProcessorX2Apic;
-
-struct AcpiMadtLocalX2ApicNmi {
- ACPI_SUB_HEADER_DEF
- uint16_t flags; /* MPS INTI flags */
- uint32_t uid; /* Processor object _UID */
- uint8_t lint; /* Local APIC LINT# */
- uint8_t reserved[3]; /* Local APIC LINT# */
-} QEMU_PACKED;
-typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi;
-
-struct AcpiMadtGenericCpuInterface {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t cpu_interface_number;
- uint32_t uid;
- uint32_t flags;
- uint32_t parking_version;
- uint32_t performance_interrupt;
- uint64_t parked_address;
- uint64_t base_address;
- uint64_t gicv_base_address;
- uint64_t gich_base_address;
- uint32_t vgic_interrupt;
- uint64_t gicr_base_address;
- uint64_t arm_mpidr;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface;
-
-/* GICC CPU Interface Flags */
-#define ACPI_MADT_GICC_ENABLED 1
-
-struct AcpiMadtGenericDistributor {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t gic_id;
- uint64_t base_address;
- uint32_t global_irq_base;
- /* ACPI 5.1 Errata 1228 Present GIC version in MADT table */
- uint8_t version;
- uint8_t reserved2[3];
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor;
-
-struct AcpiMadtGenericMsiFrame {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t gic_msi_frame_id;
- uint64_t base_address;
- uint32_t flags;
- uint16_t spi_count;
- uint16_t spi_base;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericMsiFrame AcpiMadtGenericMsiFrame;
-
-struct AcpiMadtGenericRedistributor {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint64_t base_address;
- uint32_t range_length;
-} QEMU_PACKED;
-
-typedef struct AcpiMadtGenericRedistributor AcpiMadtGenericRedistributor;
-
-struct AcpiMadtGenericTranslator {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t translation_id;
- uint64_t base_address;
+ uint8_t pci_segment;
uint32_t reserved2;
-} QEMU_PACKED;
+} AcpiSpcrData;
-typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator;
-
-/*
- * Generic Timer Description Table (GTDT)
- */
-#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0)
-#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0)
-#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2)
-
-struct AcpiGenericTimerTable {
- ACPI_TABLE_HEADER_DEF
- uint64_t counter_block_addresss;
- uint32_t reserved;
- uint32_t secure_el1_interrupt;
- uint32_t secure_el1_flags;
- uint32_t non_secure_el1_interrupt;
- uint32_t non_secure_el1_flags;
- uint32_t virtual_timer_interrupt;
- uint32_t virtual_timer_flags;
- uint32_t non_secure_el2_interrupt;
- uint32_t non_secure_el2_flags;
- uint64_t counter_read_block_address;
- uint32_t platform_timer_count;
- uint32_t platform_timer_offset;
-} QEMU_PACKED;
-typedef struct AcpiGenericTimerTable AcpiGenericTimerTable;
-
-/*
- * HPET Description Table
- */
-struct Acpi20Hpet {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t timer_block_id;
- struct AcpiGenericAddress addr;
- uint8_t hpet_number;
- uint16_t min_tick;
- uint8_t page_protect;
-} QEMU_PACKED;
-typedef struct Acpi20Hpet Acpi20Hpet;
-
-/*
- * SRAT (NUMA topology description) table
- */
-
-struct AcpiSystemResourceAffinityTable {
- ACPI_TABLE_HEADER_DEF
- uint32_t reserved1;
- uint32_t reserved2[2];
-} QEMU_PACKED;
-typedef struct AcpiSystemResourceAffinityTable AcpiSystemResourceAffinityTable;
-
-#define ACPI_SRAT_PROCESSOR_APIC 0
-#define ACPI_SRAT_MEMORY 1
-#define ACPI_SRAT_PROCESSOR_x2APIC 2
-#define ACPI_SRAT_PROCESSOR_GICC 3
-
-struct AcpiSratProcessorAffinity {
- ACPI_SUB_HEADER_DEF
- uint8_t proximity_lo;
- uint8_t local_apic_id;
- uint32_t flags;
- uint8_t local_sapic_eid;
- uint8_t proximity_hi[3];
- uint32_t reserved;
-} QEMU_PACKED;
-typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity;
-
-struct AcpiSratProcessorX2ApicAffinity {
- ACPI_SUB_HEADER_DEF
- uint16_t reserved;
- uint32_t proximity_domain;
- uint32_t x2apic_id;
- uint32_t flags;
- uint32_t clk_domain;
- uint32_t reserved2;
-} QEMU_PACKED;
-typedef struct AcpiSratProcessorX2ApicAffinity AcpiSratProcessorX2ApicAffinity;
-
-struct AcpiSratMemoryAffinity {
- ACPI_SUB_HEADER_DEF
- uint32_t proximity;
- uint16_t reserved1;
- uint64_t base_addr;
- uint64_t range_length;
- uint32_t reserved2;
- uint32_t flags;
- uint32_t reserved3[2];
-} QEMU_PACKED;
-typedef struct AcpiSratMemoryAffinity AcpiSratMemoryAffinity;
-
-struct AcpiSratProcessorGiccAffinity {
- ACPI_SUB_HEADER_DEF
- uint32_t proximity;
- uint32_t acpi_processor_uid;
- uint32_t flags;
- uint32_t clock_domain;
-} QEMU_PACKED;
-
-typedef struct AcpiSratProcessorGiccAffinity AcpiSratProcessorGiccAffinity;
-
-/*
- * TCPA Description Table
- *
- * Following Level 00, Rev 00.37 of specs:
- * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification
- */
-struct Acpi20Tcpa {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint16_t platform_class;
- uint32_t log_area_minimum_length;
- uint64_t log_area_start_address;
-} QEMU_PACKED;
-typedef struct Acpi20Tcpa Acpi20Tcpa;
-
-/* DMAR - DMA Remapping table r2.2 */
-struct AcpiTableDmar {
- ACPI_TABLE_HEADER_DEF
- uint8_t host_address_width; /* Maximum DMA physical addressability */
- uint8_t flags;
- uint8_t reserved[10];
-} QEMU_PACKED;
-typedef struct AcpiTableDmar AcpiTableDmar;
-
-/* Masks for Flags field above */
-#define ACPI_DMAR_INTR_REMAP 1
-#define ACPI_DMAR_X2APIC_OPT_OUT (1 << 1)
-
-/* Values for sub-structure type for DMAR */
-enum {
- ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, /* DRHD */
- ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, /* RMRR */
- ACPI_DMAR_TYPE_ATSR = 2, /* ATSR */
- ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, /* RHSR */
- ACPI_DMAR_TYPE_ANDD = 4, /* ANDD */
- ACPI_DMAR_TYPE_RESERVED = 5 /* Reserved for furture use */
-};
-
-/*
- * Sub-structures for DMAR
- */
-
-/* Device scope structure for DRHD. */
-struct AcpiDmarDeviceScope {
- uint8_t entry_type;
- uint8_t length;
- uint16_t reserved;
- uint8_t enumeration_id;
- uint8_t bus;
- struct {
- uint8_t device;
- uint8_t function;
- } path[];
-} QEMU_PACKED;
-typedef struct AcpiDmarDeviceScope AcpiDmarDeviceScope;
-
-/* Type 0: Hardware Unit Definition */
-struct AcpiDmarHardwareUnit {
- uint16_t type;
- uint16_t length;
- uint8_t flags;
- uint8_t reserved;
- uint16_t pci_segment; /* The PCI Segment associated with this unit */
- uint64_t address; /* Base address of remapping hardware register-set */
- AcpiDmarDeviceScope scope[];
-} QEMU_PACKED;
-typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit;
-
-/* Type 2: Root Port ATS Capability Reporting Structure */
-struct AcpiDmarRootPortATS {
- uint16_t type;
- uint16_t length;
- uint8_t flags;
- uint8_t reserved;
- uint16_t pci_segment;
- AcpiDmarDeviceScope scope[];
-} QEMU_PACKED;
-typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS;
-
-/* Masks for Flags field above */
-#define ACPI_DMAR_INCLUDE_PCI_ALL 1
-#define ACPI_DMAR_ATSR_ALL_PORTS 1
-
-/*
- * Input Output Remapping Table (IORT)
- * Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049B, October 2015
- */
-
-struct AcpiIortTable {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- uint32_t node_count;
- uint32_t node_offset;
- uint32_t reserved;
-} QEMU_PACKED;
-typedef struct AcpiIortTable AcpiIortTable;
-
-/*
- * IORT node types
- */
-
-#define ACPI_IORT_NODE_HEADER_DEF /* Node format common fields */ \
- uint8_t type; \
- uint16_t length; \
- uint8_t revision; \
- uint32_t reserved; \
- uint32_t mapping_count; \
- uint32_t mapping_offset;
-
-/* Values for node Type above */
-enum {
- ACPI_IORT_NODE_ITS_GROUP = 0x00,
- ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
- ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
- ACPI_IORT_NODE_SMMU = 0x03,
- ACPI_IORT_NODE_SMMU_V3 = 0x04
-};
-
-struct AcpiIortIdMapping {
- uint32_t input_base;
- uint32_t id_count;
- uint32_t output_base;
- uint32_t output_reference;
- uint32_t flags;
-} QEMU_PACKED;
-typedef struct AcpiIortIdMapping AcpiIortIdMapping;
-
-struct AcpiIortMemoryAccess {
- uint32_t cache_coherency;
- uint8_t hints;
- uint16_t reserved;
- uint8_t memory_flags;
-} QEMU_PACKED;
-typedef struct AcpiIortMemoryAccess AcpiIortMemoryAccess;
-
-struct AcpiIortItsGroup {
- ACPI_IORT_NODE_HEADER_DEF
- uint32_t its_count;
- uint32_t identifiers[];
-} QEMU_PACKED;
-typedef struct AcpiIortItsGroup AcpiIortItsGroup;
-
-#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE 1
-
-struct AcpiIortSmmu3 {
- ACPI_IORT_NODE_HEADER_DEF
- uint64_t base_address;
- uint32_t flags;
- uint32_t reserved2;
- uint64_t vatos_address;
- uint32_t model;
- uint32_t event_gsiv;
- uint32_t pri_gsiv;
- uint32_t gerr_gsiv;
- uint32_t sync_gsiv;
- AcpiIortIdMapping id_mapping_array[];
-} QEMU_PACKED;
-typedef struct AcpiIortSmmu3 AcpiIortSmmu3;
-
-struct AcpiIortRC {
- ACPI_IORT_NODE_HEADER_DEF
- AcpiIortMemoryAccess memory_properties;
- uint32_t ats_attribute;
- uint32_t pci_segment_number;
- AcpiIortIdMapping id_mapping_array[];
-} QEMU_PACKED;
-typedef struct AcpiIortRC AcpiIortRC;
+#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
+#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)
#endif
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index 4bef575312..e0e51e85b4 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -47,6 +47,8 @@
#define ACPI_PM_PROP_PM_IO_BASE "pm_io_base"
#define ACPI_PM_PROP_GPE0_BLK "gpe0_blk"
#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
+#define ACPI_PM_PROP_ACPI_PCIHP_BRIDGE "acpi-pci-hotplug-with-bridge-support"
+#define ACPI_PM_PROP_ACPI_PCI_ROOTHP "acpi-root-pci-hotplug"
/* PM Timer ticks per second (HZ) */
#define PM_TIMER_FREQUENCY 3579545
@@ -64,7 +66,7 @@
#define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200
#define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_WAKE_STATUS 0x8000
#define ACPI_BITMASK_ALL_FIXED_STATUS (\
@@ -82,7 +84,7 @@
#define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200
#define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_PM1_COMMON_ENABLED ( \
ACPI_BITMASK_RT_CLOCK_ENABLE | \
@@ -128,6 +130,7 @@ struct ACPIPM1CNT {
MemoryRegion io;
uint16_t cnt;
uint8_t s4_val;
+ bool acpi_only;
};
struct ACPIGPE {
@@ -163,7 +166,8 @@ void acpi_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
/* PM1a_CNT: piix and ich9 don't implement PM1b CNT. */
void acpi_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent,
- bool disable_s3, bool disable_s4, uint8_t s4_val);
+ bool disable_s3, bool disable_s4, uint8_t s4_val,
+ bool acpi_only);
void acpi_pm1_cnt_update(ACPIREGS *ar,
bool sci_enable, bool sci_disable);
void acpi_pm1_cnt_reset(ACPIREGS *ar);
diff --git a/include/hw/acpi/acpi_aml_interface.h b/include/hw/acpi/acpi_aml_interface.h
new file mode 100644
index 0000000000..11748a8866
--- /dev/null
+++ b/include/hw/acpi/acpi_aml_interface.h
@@ -0,0 +1,52 @@
+#ifndef ACPI_AML_INTERFACE_H
+#define ACPI_AML_INTERFACE_H
+
+#include "qom/object.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_ACPI_DEV_AML_IF "acpi-dev-aml-interface"
+typedef struct AcpiDevAmlIfClass AcpiDevAmlIfClass;
+DECLARE_CLASS_CHECKERS(AcpiDevAmlIfClass, ACPI_DEV_AML_IF, TYPE_ACPI_DEV_AML_IF)
+#define ACPI_DEV_AML_IF(obj) \
+ INTERFACE_CHECK(AcpiDevAmlIf, (obj), TYPE_ACPI_DEV_AML_IF)
+
+typedef struct AcpiDevAmlIf AcpiDevAmlIf;
+typedef void (*dev_aml_fn)(AcpiDevAmlIf *adev, Aml *scope);
+
+/**
+ * AcpiDevAmlIfClass:
+ *
+ * build_dev_aml: adds device specific AML blob to provided scope
+ *
+ * Interface is designed for providing generic callback that builds device
+ * specific AML blob.
+ */
+struct AcpiDevAmlIfClass {
+ /* <private> */
+ InterfaceClass parent_class;
+
+ /* <public> */
+ dev_aml_fn build_dev_aml;
+};
+
+static inline dev_aml_fn get_dev_aml_func(DeviceState *dev)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_ACPI_DEV_AML_IF)) {
+ AcpiDevAmlIfClass *klass = ACPI_DEV_AML_IF_GET_CLASS(dev);
+ return klass->build_dev_aml;
+ }
+ return NULL;
+}
+
+static inline void call_dev_aml_func(DeviceState *dev, Aml *scope)
+{
+ dev_aml_fn fn = get_dev_aml_func(dev);
+ if (fn) {
+ fn(ACPI_DEV_AML_IF(dev), scope);
+ }
+}
+
+void qbus_build_aml(BusState *bus, Aml *scope);
+
+#endif
diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h
index a2a12af9b9..68d9d15f50 100644
--- a/include/hw/acpi/acpi_dev_interface.h
+++ b/include/hw/acpi/acpi_dev_interface.h
@@ -1,9 +1,8 @@
#ifndef ACPI_DEV_INTERFACE_H
#define ACPI_DEV_INTERFACE_H
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-acpi.h"
#include "qom/object.h"
-#include "hw/boards.h"
#include "hw/qdev-core.h"
/* These values are part of guest ABI, and can not be changed */
@@ -18,12 +17,9 @@ typedef enum {
#define TYPE_ACPI_DEVICE_IF "acpi-device-interface"
-#define ACPI_DEVICE_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(AcpiDeviceIfClass, (klass), \
- TYPE_ACPI_DEVICE_IF)
-#define ACPI_DEVICE_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AcpiDeviceIfClass, (obj), \
- TYPE_ACPI_DEVICE_IF)
+typedef struct AcpiDeviceIfClass AcpiDeviceIfClass;
+DECLARE_CLASS_CHECKERS(AcpiDeviceIfClass, ACPI_DEVICE_IF,
+ TYPE_ACPI_DEVICE_IF)
#define ACPI_DEVICE_IF(obj) \
INTERFACE_CHECK(AcpiDeviceIf, (obj), \
TYPE_ACPI_DEVICE_IF)
@@ -48,14 +44,12 @@ void acpi_send_event(DeviceState *dev, AcpiEventStatusBits event);
* knowledge about internals of actual device that implements
* ACPI interface.
*/
-typedef struct AcpiDeviceIfClass {
+struct AcpiDeviceIfClass {
/* <private> */
InterfaceClass parent_class;
/* <public> */
void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev);
- void (*madt_cpu)(AcpiDeviceIf *adev, int uid,
- const CPUArchIdList *apic_ids, GArray *entry);
-} AcpiDeviceIfClass;
+};
#endif
diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h
new file mode 100644
index 0000000000..a304bad73e
--- /dev/null
+++ b/include/hw/acpi/acpi_generic_initiator.h
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef ACPI_GENERIC_INITIATOR_H
+#define ACPI_GENERIC_INITIATOR_H
+
+#include "qom/object_interfaces.h"
+
+#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator"
+
+typedef struct AcpiGenericInitiator {
+ /* private */
+ Object parent;
+
+ /* public */
+ char *pci_dev;
+ uint16_t node;
+} AcpiGenericInitiator;
+
+/*
+ * ACPI 6.3:
+ * Table 5-81 Flags – Generic Initiator Affinity Structure
+ */
+typedef enum {
+ /*
+ * If clear, the OSPM ignores the contents of the Generic
+ * Initiator/Port Affinity Structure. This allows system firmware
+ * to populate the SRAT with a static number of structures, but only
+ * enable them as necessary.
+ */
+ GEN_AFFINITY_ENABLED = (1 << 0),
+} GenericAffinityFlags;
+
+/*
+ * ACPI 6.3:
+ * Table 5-80 Device Handle - PCI
+ */
+typedef struct PCIDeviceHandle {
+ uint16_t segment;
+ uint16_t bdf;
+} PCIDeviceHandle;
+
+void build_srat_generic_pci_initiator(GArray *table_data);
+
+#endif
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index d27da03d64..a3784155cb 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -4,11 +4,8 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/bios-linker-loader.h"
-/* Reserve RAM space for tables: add another order of magnitude. */
-#define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
-
#define ACPI_BUILD_APPNAME6 "BOCHS "
-#define ACPI_BUILD_APPNAME4 "BXPC"
+#define ACPI_BUILD_APPNAME8 "BXPC "
#define ACPI_BUILD_TABLE_FILE "etc/acpi/tables"
#define ACPI_BUILD_RSDP_FILE "etc/acpi/rsdp"
@@ -224,6 +221,20 @@ struct AcpiBuildTables {
BIOSLinker *linker;
} AcpiBuildTables;
+typedef
+struct CrsRangeEntry {
+ uint64_t base;
+ uint64_t limit;
+} CrsRangeEntry;
+
+typedef
+struct CrsRangeSet {
+ GPtrArray *io_ranges;
+ GPtrArray *mem_ranges;
+ GPtrArray *mem_64bit_ranges;
+} CrsRangeSet;
+
+
/*
* ACPI 5.0: 6.4.3.8.2 Serial Bus Connection Descriptors
* Serial Bus Type
@@ -266,7 +277,7 @@ void free_aml_allocator(void);
* @child: element that is copied into @parent_ctx context
*
* Joins Aml elements together and helps to construct AML tables
- * Examle of usage:
+ * Example of usage:
* Aml *table = aml_def_block("SSDT", ...);
* Aml *sb = aml_scope("\\_SB");
* Aml *dev = aml_device("PCI0");
@@ -278,7 +289,7 @@ void free_aml_allocator(void);
void aml_append(Aml *parent_ctx, Aml *child);
/* non block AML object primitives */
-Aml *aml_name(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_name(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_name_decl(const char *name, Aml *val);
Aml *aml_debug(void);
Aml *aml_return(Aml *val);
@@ -287,9 +298,11 @@ Aml *aml_arg(int pos);
Aml *aml_to_integer(Aml *arg);
Aml *aml_to_hexstring(Aml *src, Aml *dst);
Aml *aml_to_buffer(Aml *src, Aml *dst);
+Aml *aml_to_decimalstring(Aml *src, Aml *dst);
Aml *aml_store(Aml *val, Aml *target);
Aml *aml_and(Aml *arg1, Aml *arg2, Aml *dst);
Aml *aml_or(Aml *arg1, Aml *arg2, Aml *dst);
+Aml *aml_land(Aml *arg1, Aml *arg2);
Aml *aml_lor(Aml *arg1, Aml *arg2);
Aml *aml_shiftleft(Aml *arg1, Aml *count);
Aml *aml_shiftright(Aml *arg1, Aml *count, Aml *dst);
@@ -300,6 +313,7 @@ Aml *aml_increment(Aml *arg);
Aml *aml_decrement(Aml *arg);
Aml *aml_index(Aml *arg1, Aml *idx);
Aml *aml_notify(Aml *arg1, Aml *arg2);
+Aml *aml_break(void);
Aml *aml_call0(const char *method);
Aml *aml_call1(const char *method, Aml *arg1);
Aml *aml_call2(const char *method, Aml *arg1, Aml *arg2);
@@ -307,6 +321,8 @@ Aml *aml_call3(const char *method, Aml *arg1, Aml *arg2, Aml *arg3);
Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4);
Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
Aml *arg5);
+Aml *aml_call6(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
+ Aml *arg5, Aml *arg6);
Aml *aml_gpio_int(AmlConsumerAndProducer con_and_pro,
AmlLevelAndEdge edge_level,
AmlActiveHighAndLow active_level, AmlShared shared,
@@ -328,13 +344,13 @@ Aml *aml_irq_no_flags(uint8_t irq);
Aml *aml_named_field(const char *name, unsigned length);
Aml *aml_reserved_field(unsigned length);
Aml *aml_local(int num);
-Aml *aml_string(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_string(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_lnot(Aml *arg);
Aml *aml_equal(Aml *arg1, Aml *arg2);
Aml *aml_lgreater(Aml *arg1, Aml *arg2);
Aml *aml_lgreater_equal(Aml *arg1, Aml *arg2);
Aml *aml_processor(uint8_t proc_id, uint32_t pblk_addr, uint8_t pblk_len,
- const char *name_format, ...) GCC_FMT_ATTR(4, 5);
+ const char *name_format, ...) G_GNUC_PRINTF(4, 5);
Aml *aml_eisaid(const char *str);
Aml *aml_word_bus_number(AmlMinFixed min_fixed, AmlMaxFixed max_fixed,
AmlDecode dec, uint16_t addr_gran,
@@ -368,8 +384,8 @@ Aml *aml_sleep(uint64_t msec);
Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source);
/* Block AML object primitives */
-Aml *aml_scope(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
-Aml *aml_device(const char *name_format, ...) GCC_FMT_ATTR(1, 2);
+Aml *aml_scope(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
+Aml *aml_device(const char *name_format, ...) G_GNUC_PRINTF(1, 2);
Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag);
Aml *aml_if(Aml *predicate);
Aml *aml_else(void);
@@ -397,10 +413,37 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
Aml *aml_object_type(Aml *object);
void build_append_int_noprefix(GArray *table, uint64_t value, int size);
-void
-build_header(BIOSLinker *linker, GArray *table_data,
- AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
- const char *oem_id, const char *oem_table_id);
+
+typedef struct AcpiTable {
+ const char *sig;
+ const uint8_t rev;
+ const char *oem_id;
+ const char *oem_table_id;
+ /* private vars tracking table state */
+ GArray *array;
+ unsigned table_offset;
+} AcpiTable;
+
+/**
+ * acpi_table_begin:
+ * initializes table header and keeps track of
+ * table data/offsets
+ * @desc: ACPI table descriptor
+ * @array: blob where the ACPI table will be composed/stored.
+ */
+void acpi_table_begin(AcpiTable *desc, GArray *array);
+
+/**
+ * acpi_table_end:
+ * sets actual table length and tells bios loader
+ * where table is for the later initialization on
+ * guest side.
+ * @linker: reference to BIOSLinker object to use for the table
+ * @table: ACPI table descriptor that was used with @acpi_table_begin
+ * counterpart
+ */
+void acpi_table_end(BIOSLinker *linker, AcpiTable *table);
+
void *acpi_data_push(GArray *table_data, unsigned size);
unsigned acpi_data_len(GArray *table);
void acpi_add_table(GArray *table_offsets, GArray *table_data);
@@ -417,7 +460,7 @@ build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets,
int
build_append_named_dword(GArray *array, const char *name_format, ...)
-GCC_FMT_ATTR(2, 3);
+G_GNUC_PRINTF(2, 3);
void build_append_gas(GArray *table, AmlAddressSpace as,
uint8_t bit_width, uint8_t bit_offset,
@@ -430,13 +473,32 @@ build_append_gas_from_struct(GArray *table, const struct AcpiGenericAddress *s)
s->access_width, s->address);
}
-void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
+void crs_range_insert(GPtrArray *ranges, uint64_t base, uint64_t limit);
+void crs_replace_with_free_ranges(GPtrArray *ranges,
+ uint64_t start, uint64_t end);
+void crs_range_set_init(CrsRangeSet *range_set);
+void crs_range_set_free(CrsRangeSet *range_set);
+
+Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset,
+ uint32_t mmio32_offset, uint64_t mmio64_offset,
+ uint16_t bus_nr_offset);
+
+void build_srat_memory(GArray *table_data, uint64_t base,
uint64_t len, int node, MemoryAffinityFlags flags);
-void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms);
+void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id);
+
+void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id);
void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
const char *oem_id, const char *oem_table_id);
-void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog);
+void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog,
+ const char *oem_id, const char *oem_table_id);
+
+void build_spcr(GArray *table_data, BIOSLinker *linker,
+ const AcpiSpcrData *f, const uint8_t rev,
+ const char *oem_id, const char *oem_table_id);
#endif
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index 62f0278ba2..e6e1a9ef59 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -12,16 +12,19 @@
#ifndef ACPI_CPU_H
#define ACPI_CPU_H
+#include "qapi/qapi-types-acpi.h"
#include "hw/qdev-core.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
+#include "hw/boards.h"
#include "hw/hotplug.h"
typedef struct AcpiCpuStatus {
- struct CPUState *cpu;
+ CPUState *cpu;
uint64_t arch_id;
bool is_inserting;
bool is_removing;
+ bool fw_remove;
uint32_t ost_event;
uint32_t ost_status;
} AcpiCpuStatus;
@@ -50,10 +53,15 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
typedef struct CPUHotplugFeatures {
bool acpi_1_compatible;
bool has_legacy_cphp;
+ bool fw_unplugs_cpu;
+ const char *smi_path;
} CPUHotplugFeatures;
+typedef void (*build_madt_cpu_fn)(int uid, const CPUArchIdList *apic_ids,
+ GArray *entry, bool force_enabled);
+
void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
- hwaddr io_base,
+ build_madt_cpu_fn build_madt_cpu, hwaddr io_base,
const char *res_root,
const char *event_handler_method);
diff --git a/include/hw/acpi/cxl.h b/include/hw/acpi/cxl.h
new file mode 100644
index 0000000000..8f22c71530
--- /dev/null
+++ b/include/hw/acpi/cxl.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ACPI_CXL_H
+#define HW_ACPI_CXL_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/cxl/cxl.h"
+
+void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
+ BIOSLinker *linker, const char *oem_id,
+ const char *oem_table_id, CXLState *cxl_state);
+void build_cxl_osc_method(Aml *dev);
+void build_cxl_dsm_method(Aml *dev);
+
+#endif
diff --git a/include/hw/acpi/erst.h b/include/hw/acpi/erst.h
new file mode 100644
index 0000000000..b2ff663ddc
--- /dev/null
+++ b/include/hw/acpi/erst.h
@@ -0,0 +1,27 @@
+/*
+ * ACPI Error Record Serialization Table, ERST, Implementation
+ *
+ * ACPI ERST introduced in ACPI 4.0, June 16, 2009.
+ * ACPI Platform Error Interfaces : Error Serialization
+ *
+ * Copyright (c) 2021 Oracle and/or its affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_ACPI_ERST_H
+#define HW_ACPI_ERST_H
+
+#include "hw/acpi/bios-linker-loader.h"
+#include "qom/object.h"
+
+void build_erst(GArray *table_data, BIOSLinker *linker, Object *erst_dev,
+ const char *oem_id, const char *oem_table_id);
+
+#define TYPE_ACPI_ERST "acpi-erst"
+
+/* returns NULL unless there is exactly one device */
+static inline Object *find_erst_dev(void)
+{
+ return object_resolve_path_type("", TYPE_ACPI_ERST, NULL);
+}
+#endif
diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h
index 90a9180db5..ba84ce0214 100644
--- a/include/hw/acpi/generic_event_device.h
+++ b/include/hw/acpi/generic_event_device.h
@@ -56,22 +56,33 @@
*
*/
-#ifndef HW_ACPI_GED_H
-#define HW_ACPI_GED_H
+#ifndef HW_ACPI_GENERIC_EVENT_DEVICE_H
+#define HW_ACPI_GENERIC_EVENT_DEVICE_H
#include "hw/sysbus.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/ghes.h"
+#include "qom/object.h"
#define ACPI_POWER_BUTTON_DEVICE "PWRB"
#define TYPE_ACPI_GED "acpi-ged"
-#define ACPI_GED(obj) \
- OBJECT_CHECK(AcpiGedState, (obj), TYPE_ACPI_GED)
+OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
#define ACPI_GED_EVT_SEL_OFFSET 0x0
#define ACPI_GED_EVT_SEL_LEN 0x4
+#define ACPI_GED_REG_SLEEP_CTL 0x00
+#define ACPI_GED_REG_SLEEP_STS 0x01
+#define ACPI_GED_REG_RESET 0x02
+#define ACPI_GED_REG_COUNT 0x03
+
+/* ACPI_GED_REG_RESET value for reset*/
+#define ACPI_GED_RESET_VALUE 0x42
+
+/* ACPI_GED_REG_SLEEP_CTL.SLP_TYP value for S5 (aka poweroff) */
+#define ACPI_GED_SLP_TYP_S5 0x05
+
#define GED_DEVICE "GED"
#define AML_GED_EVT_REG "EREG"
#define AML_GED_EVT_SEL "ESEL"
@@ -87,10 +98,11 @@
typedef struct GEDState {
MemoryRegion evt;
+ MemoryRegion regs;
uint32_t sel;
} GEDState;
-typedef struct AcpiGedState {
+struct AcpiGedState {
SysBusDevice parent_obj;
MemHotplugState memhp_state;
MemoryRegion container_memhp;
@@ -98,9 +110,10 @@ typedef struct AcpiGedState {
uint32_t ged_event_bitmap;
qemu_irq irq;
AcpiGhesState ghes_state;
-} AcpiGedState;
+};
void build_ged_aml(Aml *table, const char* name, HotplugHandler *hotplug_dev,
uint32_t ged_irq, AmlRegionSpace rs, hwaddr ged_base);
+void acpi_dsdt_add_power_button(Aml *scope);
#endif
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
index 4ad025e09a..674f6958e9 100644
--- a/include/hw/acpi/ghes.h
+++ b/include/hw/acpi/ghes.h
@@ -64,11 +64,21 @@ enum {
typedef struct AcpiGhesState {
uint64_t ghes_addr_le;
+ bool present; /* True if GHES is present at all on this board */
} AcpiGhesState;
void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker);
-void acpi_build_hest(GArray *table_data, BIOSLinker *linker);
+void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
+ const char *oem_id, const char *oem_table_id);
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
GArray *hardware_errors);
int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr);
+
+/**
+ * acpi_ghes_present: Report whether ACPI GHES table is present
+ *
+ * Returns: true if the system has an ACPI GHES table and it is
+ * safe to call acpi_ghes_record_errors() to record a memory error.
+ */
+bool acpi_ghes_present(void);
#endif
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index 28a53181cb..2faf7f0cae 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -7,7 +7,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,9 +24,12 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/cpu_hotplug.h"
#include "hw/acpi/cpu.h"
+#include "hw/acpi/pcihp.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/acpi_dev_interface.h"
-#include "hw/acpi/tco.h"
+#include "hw/acpi/ich9_tco.h"
+
+#define ACPI_PCIHP_ADDR_ICH9 0x0cc0
typedef struct ICH9LPCPMRegs {
/*
@@ -53,21 +56,23 @@ typedef struct ICH9LPCPMRegs {
AcpiCpuHotplug gpe_cpu;
CPUHotplugState cpuhp_state;
+ bool keep_pci_slot_hpc;
+ bool use_acpi_hotplug_bridge;
+ AcpiPciHpState acpi_pci_hotplug;
MemHotplugState acpi_memory_hotplug;
uint8_t disable_s3;
uint8_t disable_s4;
uint8_t s4_val;
- uint8_t smm_enabled;
+ bool smm_enabled;
+ bool smm_compat;
bool enable_tco;
TCOIORegs tco_regs;
} ICH9LPCPMRegs;
#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
-void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
- bool smm_enabled,
- qemu_irq sci_irq);
+void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq);
void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base);
extern const VMStateDescription vmstate_ich9_pm;
@@ -82,6 +87,7 @@ void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
+bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus);
void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
#endif /* HW_ACPI_ICH9_H */
diff --git a/include/hw/acpi/tco.h b/include/hw/acpi/ich9_tco.h
index a1e0da8213..2562a7cf39 100644
--- a/include/hw/acpi/tco.h
+++ b/include/hw/acpi/ich9_tco.h
@@ -1,5 +1,5 @@
/*
- * QEMU ICH9 TCO emulation
+ * QEMU ICH9 TCO emulation (total cost of ownership)
*
* Copyright (c) 2015 Paulo Alcantara <pcacjr@zytor.com>
*
@@ -11,6 +11,7 @@
#define HW_ACPI_TCO_H
#include "exec/memory.h"
+#include "migration/vmstate.h"
/* As per ICH9 spec, the internal timer has an error of ~0.6s on every tick */
#define TCO_TICK_NSEC 600000000LL
diff --git a/include/hw/acpi/ipmi.h b/include/hw/acpi/ipmi.h
index c14ad682ac..6c8079c97a 100644
--- a/include/hw/acpi/ipmi.h
+++ b/include/hw/acpi/ipmi.h
@@ -9,13 +9,8 @@
#ifndef HW_ACPI_IPMI_H
#define HW_ACPI_IPMI_H
-#include "hw/acpi/aml-build.h"
+#include "hw/acpi/acpi_aml_interface.h"
-/*
- * Add ACPI IPMI entries for all registered IPMI devices whose parent
- * bus matches the given bus. The resource is the ACPI resource that
- * contains the IPMI device, this is required for the I2C CRS.
- */
-void build_acpi_ipmi_devices(Aml *table, BusState *bus, const char *resource);
+void build_ipmi_dev_aml(AcpiDevAmlIf *adev, Aml *scope);
#endif /* HW_ACPI_IPMI_H */
diff --git a/include/hw/acpi/memory_hotplug.h b/include/hw/acpi/memory_hotplug.h
index dfe9cf3fde..38841d7b06 100644
--- a/include/hw/acpi/memory_hotplug.h
+++ b/include/hw/acpi/memory_hotplug.h
@@ -1,6 +1,7 @@
#ifndef QEMU_HW_ACPI_MEMORY_HOTPLUG_H
#define QEMU_HW_ACPI_MEMORY_HOTPLUG_H
+#include "qapi/qapi-types-acpi.h"
#include "hw/qdev-core.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
diff --git a/include/hw/acpi/pc-hotplug.h b/include/hw/acpi/pc-hotplug.h
index 31bc9191c3..8a654248e9 100644
--- a/include/hw/acpi/pc-hotplug.h
+++ b/include/hw/acpi/pc-hotplug.h
@@ -13,7 +13,7 @@
#define PC_HOTPLUG_H
/*
- * ONLY DEFINEs are permited in this file since it's shared
+ * ONLY DEFINEs are permitted in this file since it's shared
* between C and ASL code.
*/
diff --git a/include/hw/acpi/pci.h b/include/hw/acpi/pci.h
index bf2a3ed0ba..467a99461c 100644
--- a/include/hw/acpi/pci.h
+++ b/include/hw/acpi/pci.h
@@ -27,11 +27,17 @@
#define HW_ACPI_PCI_H
#include "hw/acpi/bios-linker-loader.h"
+#include "hw/acpi/acpi_aml_interface.h"
typedef struct AcpiMcfgInfo {
uint64_t base;
uint32_t size;
} AcpiMcfgInfo;
-void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info);
+void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info,
+ const char *oem_id, const char *oem_table_id);
+Aml *aml_pci_device_dsm(void);
+
+void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus);
+void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope);
#endif
diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h
index 8bc4a4c01d..ac21a95913 100644
--- a/include/hw/acpi/pcihp.h
+++ b/include/hw/acpi/pcihp.h
@@ -10,7 +10,7 @@
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
- * License version 2 as published by the Free Software Foundation.
+ * License version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -46,16 +46,19 @@ typedef struct AcpiPciHpPciStatus {
typedef struct AcpiPciHpState {
AcpiPciHpPciStatus acpi_pcihp_pci_status[ACPI_PCIHP_MAX_HOTPLUG_BUS];
uint32_t hotplug_select;
+ uint32_t acpi_index;
PCIBus *root;
MemoryRegion io;
- bool legacy_piix;
uint16_t io_base;
uint16_t io_len;
+ bool use_acpi_hotplug_bridge;
+ bool use_acpi_root_pci_hotplug;
} AcpiPciHpState;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
- MemoryRegion *address_space_io, bool bridges_enabled);
+ MemoryRegion *io, uint16_t io_base);
+bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus);
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
@@ -69,15 +72,19 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
/* Called on reset */
void acpi_pcihp_reset(AcpiPciHpState *s);
+void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus);
+
extern const VMStateDescription vmstate_acpi_pcihp_pci_status;
-#define VMSTATE_PCI_HOTPLUG(pcihp, state, test_pcihp) \
+#define VMSTATE_PCI_HOTPLUG(pcihp, state, test_pcihp, test_acpi_index) \
VMSTATE_UINT32_TEST(pcihp.hotplug_select, state, \
test_pcihp), \
VMSTATE_STRUCT_ARRAY_TEST(pcihp.acpi_pcihp_pci_status, state, \
ACPI_PCIHP_MAX_HOTPLUG_BUS, \
test_pcihp, 1, \
vmstate_acpi_pcihp_pci_status, \
- AcpiPciHpPciStatus)
+ AcpiPciHpPciStatus), \
+ VMSTATE_UINT32_TEST(pcihp.acpi_index, state, \
+ test_acpi_index)
#endif
diff --git a/include/hw/acpi/piix4.h b/include/hw/acpi/piix4.h
new file mode 100644
index 0000000000..eb1c122d80
--- /dev/null
+++ b/include/hw/acpi/piix4.h
@@ -0,0 +1,73 @@
+/*
+ * ACPI implementation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef HW_ACPI_PIIX4_H
+#define HW_ACPI_PIIX4_H
+
+#include "hw/pci/pci_device.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "hw/acpi/pcihp.h"
+#include "hw/i2c/pm_smbus.h"
+#include "hw/isa/apm.h"
+
+#define TYPE_PIIX4_PM "PIIX4_PM"
+OBJECT_DECLARE_SIMPLE_TYPE(PIIX4PMState, PIIX4_PM)
+
+struct PIIX4PMState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion io;
+ uint32_t io_base;
+
+ MemoryRegion io_gpe;
+ ACPIREGS ar;
+
+ APMState apm;
+
+ PMSMBus smb;
+ uint32_t smb_io_base;
+
+ qemu_irq irq;
+ qemu_irq smi_irq;
+ bool smm_enabled;
+ bool smm_compat;
+ Notifier machine_ready;
+ Notifier powerdown_notifier;
+
+ AcpiPciHpState acpi_pci_hotplug;
+ bool not_migrate_acpi_index;
+
+ uint8_t disable_s3;
+ uint8_t disable_s4;
+ uint8_t s4_val;
+
+ bool cpu_hotplug_legacy;
+ AcpiCpuHotplug gpe_cpu;
+ CPUHotplugState cpuhp_state;
+
+ MemHotplugState acpi_memory_hotplug;
+};
+
+#endif
diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h
index 1a2a57a21f..579c45f5ba 100644
--- a/include/hw/acpi/tpm.h
+++ b/include/hw/acpi/tpm.h
@@ -21,6 +21,8 @@
#include "hw/acpi/aml-build.h"
#include "sysemu/tpm.h"
+#ifdef CONFIG_TPM
+
#define TPM_TIS_ADDR_BASE 0xFED40000
#define TPM_TIS_ADDR_SIZE 0x5000
@@ -91,6 +93,7 @@
#define TPM_TIS_CAP_DATA_TRANSFER_64B (3 << 9)
#define TPM_TIS_CAP_DATA_TRANSFER_LEGACY (0 << 9)
#define TPM_TIS_CAP_BURST_COUNT_DYNAMIC (0 << 8)
+#define TPM_TIS_CAP_BURST_COUNT_STATIC (1 << 8)
#define TPM_TIS_CAP_INTERRUPT_LOW_LEVEL (1 << 4) /* support is mandatory */
#define TPM_TIS_CAPABILITIES_SUPPORTED1_3 \
(TPM_TIS_CAP_INTERRUPT_LOW_LEVEL | \
@@ -207,6 +210,48 @@ REG32(CRB_DATA_BUFFER, 0x80)
#define TPM_PPI_FUNC_ALLOWED_USR_NOT_REQ (4 << 0)
#define TPM_PPI_FUNC_MASK (7 << 0)
+/* TPM TIS I2C registers */
+#define TPM_I2C_REG_LOC_SEL 0x00
+#define TPM_I2C_REG_ACCESS 0x04
+#define TPM_I2C_REG_INT_ENABLE 0x08
+#define TPM_I2C_REG_INT_CAPABILITY 0x14
+#define TPM_I2C_REG_STS 0x18
+#define TPM_I2C_REG_DATA_FIFO 0x24
+#define TPM_I2C_REG_INTF_CAPABILITY 0x30
+#define TPM_I2C_REG_I2C_DEV_ADDRESS 0x38
+#define TPM_I2C_REG_DATA_CSUM_ENABLE 0x40
+#define TPM_I2C_REG_DATA_CSUM_GET 0x44
+#define TPM_I2C_REG_DID_VID 0x48
+#define TPM_I2C_REG_RID 0x4c
+#define TPM_I2C_REG_UNKNOWN 0xff
+
+/* I2C specific interface capabilities */
+#define TPM_I2C_CAP_INTERFACE_TYPE (0x2 << 0) /* FIFO interface */
+#define TPM_I2C_CAP_INTERFACE_VER (0x0 << 4) /* TCG I2C intf 1.0 */
+#define TPM_I2C_CAP_TPM2_FAMILY (0x1 << 7) /* TPM 2.0 family. */
+#define TPM_I2C_CAP_DEV_ADDR_CHANGE (0x0 << 27) /* No dev addr chng */
+#define TPM_I2C_CAP_BURST_COUNT_STATIC (0x1 << 29) /* Burst count static */
+#define TPM_I2C_CAP_LOCALITY_CAP (0x1 << 25) /* 0-5 locality */
+#define TPM_I2C_CAP_BUS_SPEED (3 << 21) /* std and fast mode */
+
+/*
+ * TPM_I2C_STS masks for read/writing bits from/to TIS
+ * TPM_STS mask for read bits 31:26 must be zero
+ */
+#define TPM_I2C_STS_READ_MASK 0x00ffffdd
+#define TPM_I2C_STS_WRITE_MASK 0x03000062
+
+/* Checksum enabled. */
+#define TPM_DATA_CSUM_ENABLED 0x1
+
+/*
+ * TPM_I2C_INT_ENABLE mask. Linux kernel does not support
+ * interrupts hence setting it to 0.
+ */
+#define TPM_I2C_INT_ENABLE_MASK 0x0
+
void tpm_build_ppi_acpi(TPMIf *tpm, Aml *dev);
+#endif /* CONFIG_TPM */
+
#endif /* HW_ACPI_TPM_H */
diff --git a/include/hw/acpi/utils.h b/include/hw/acpi/utils.h
index 140b4de603..0022df027d 100644
--- a/include/hw/acpi/utils.h
+++ b/include/hw/acpi/utils.h
@@ -4,6 +4,5 @@
#include "hw/nvram/fw_cfg.h"
MemoryRegion *acpi_add_rom_blob(FWCfgCallback update, void *opaque,
- GArray *blob, const char *name,
- uint64_t max_size);
+ GArray *blob, const char *name);
#endif
diff --git a/include/hw/acpi/vmgenid.h b/include/hw/acpi/vmgenid.h
index c49d913f3e..fb135d5bcb 100644
--- a/include/hw/acpi/vmgenid.h
+++ b/include/hw/acpi/vmgenid.h
@@ -4,33 +4,34 @@
#include "hw/acpi/bios-linker-loader.h"
#include "hw/qdev-core.h"
#include "qemu/uuid.h"
+#include "qom/object.h"
-#define VMGENID_DEVICE "vmgenid"
+#define TYPE_VMGENID "vmgenid"
#define VMGENID_GUID "guid"
#define VMGENID_GUID_FW_CFG_FILE "etc/vmgenid_guid"
#define VMGENID_ADDR_FW_CFG_FILE "etc/vmgenid_addr"
#define VMGENID_FW_CFG_SIZE 4096 /* Occupy a page of memory */
#define VMGENID_GUID_OFFSET 40 /* allow space for
- * OVMF SDT Header Probe Supressor
+ * OVMF SDT Header Probe Suppressor
*/
-#define VMGENID(obj) OBJECT_CHECK(VmGenIdState, (obj), VMGENID_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(VmGenIdState, VMGENID)
-typedef struct VmGenIdState {
- DeviceClass parent_obj;
+struct VmGenIdState {
+ DeviceState parent_obj;
QemuUUID guid; /* The 128-bit GUID seen by the guest */
uint8_t vmgenid_addr_le[8]; /* Address of the GUID (little-endian) */
-} VmGenIdState;
+};
/* returns NULL unless there is exactly one device */
static inline Object *find_vmgenid_dev(void)
{
- return object_resolve_path_type("", VMGENID_DEVICE, NULL);
+ return object_resolve_path_type("", TYPE_VMGENID, NULL);
}
void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid,
- BIOSLinker *linker);
+ BIOSLinker *linker, const char *oem_id);
void vmgenid_add_fw_cfg(VmGenIdState *vms, FWCfgState *s, GArray *guid);
#endif
diff --git a/include/hw/adc/aspeed_adc.h b/include/hw/adc/aspeed_adc.h
new file mode 100644
index 0000000000..ff1d06ea91
--- /dev/null
+++ b/include/hw/adc/aspeed_adc.h
@@ -0,0 +1,56 @@
+/*
+ * Aspeed ADC
+ *
+ * Copyright 2017-2021 IBM Corp.
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_ADC_ASPEED_ADC_H
+#define HW_ADC_ASPEED_ADC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_ADC "aspeed.adc"
+#define TYPE_ASPEED_2400_ADC TYPE_ASPEED_ADC "-ast2400"
+#define TYPE_ASPEED_2500_ADC TYPE_ASPEED_ADC "-ast2500"
+#define TYPE_ASPEED_2600_ADC TYPE_ASPEED_ADC "-ast2600"
+#define TYPE_ASPEED_1030_ADC TYPE_ASPEED_ADC "-ast1030"
+OBJECT_DECLARE_TYPE(AspeedADCState, AspeedADCClass, ASPEED_ADC)
+
+#define TYPE_ASPEED_ADC_ENGINE "aspeed.adc.engine"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedADCEngineState, ASPEED_ADC_ENGINE)
+
+#define ASPEED_ADC_NR_CHANNELS 16
+#define ASPEED_ADC_NR_REGS (0xD0 >> 2)
+
+struct AspeedADCEngineState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+ uint32_t engine_id;
+ uint32_t nr_channels;
+ uint32_t regs[ASPEED_ADC_NR_REGS];
+};
+
+struct AspeedADCState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ AspeedADCEngineState engines[2];
+};
+
+struct AspeedADCClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t nr_engines;
+};
+
+#endif /* HW_ADC_ASPEED_ADC_H */
diff --git a/include/hw/misc/max111x.h b/include/hw/adc/max111x.h
index af7f1017ef..beff59c815 100644
--- a/include/hw/misc/max111x.h
+++ b/include/hw/adc/max111x.h
@@ -14,6 +14,7 @@
#define HW_MISC_MAX111X_H
#include "hw/ssi/ssi.h"
+#include "qom/object.h"
/*
* This is a model of the Maxim MAX1110/1111 ADC chip, which for QEMU
@@ -31,8 +32,8 @@
* + the interrupt line is not correctly implemented, and will never
* be lowered once it has been asserted.
*/
-typedef struct {
- SSISlave parent_obj;
+struct MAX111xState {
+ SSIPeripheral parent_obj;
qemu_irq interrupt;
/* Values of inputs at system reset (settable by QOM property) */
@@ -43,12 +44,11 @@ typedef struct {
uint8_t input[8];
int inputs, com;
-} MAX111xState;
+};
#define TYPE_MAX_111X "max111x"
-#define MAX_111X(obj) \
- OBJECT_CHECK(MAX111xState, (obj), TYPE_MAX_111X)
+OBJECT_DECLARE_SIMPLE_TYPE(MAX111xState, MAX_111X)
#define TYPE_MAX_1110 "max1110"
#define TYPE_MAX_1111 "max1111"
diff --git a/include/hw/adc/npcm7xx_adc.h b/include/hw/adc/npcm7xx_adc.h
new file mode 100644
index 0000000000..93330a408d
--- /dev/null
+++ b/include/hw/adc/npcm7xx_adc.h
@@ -0,0 +1,68 @@
+/*
+ * Nuvoton NPCM7xx ADC Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_ADC_H
+#define NPCM7XX_ADC_H
+
+#include "hw/clock.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+#define NPCM7XX_ADC_NUM_INPUTS 8
+/**
+ * This value should not be changed unless write_adc_calibration function in
+ * hw/arm/npcm7xx.c is also changed.
+ */
+#define NPCM7XX_ADC_NUM_CALIB 2
+
+/**
+ * struct NPCM7xxADCState - Analog to Digital Converter Module device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @conv_timer: The timer counts down remaining cycles for the conversion.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @con: The Control Register.
+ * @data: The Data Buffer.
+ * @clock: The ADC Clock.
+ * @adci: The input voltage in units of uV. 1uv = 1e-6V.
+ * @vref: The external reference voltage.
+ * @iref: The internal reference voltage, initialized at launch time.
+ * @rv: The calibrated output values of 0.5V and 1.5V for the ADC.
+ */
+struct NPCM7xxADCState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ QEMUTimer conv_timer;
+
+ qemu_irq irq;
+ uint32_t con;
+ uint32_t data;
+ Clock *clock;
+
+ /* Voltages are in unit of uV. 1V = 1000000uV. */
+ uint32_t adci[NPCM7XX_ADC_NUM_INPUTS];
+ uint32_t vref;
+ uint32_t iref;
+
+ uint16_t calibration_r_values[NPCM7XX_ADC_NUM_CALIB];
+};
+
+#define TYPE_NPCM7XX_ADC "npcm7xx-adc"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxADCState, NPCM7XX_ADC)
+
+#endif /* NPCM7XX_ADC_H */
diff --git a/include/hw/adc/stm32f2xx_adc.h b/include/hw/adc/stm32f2xx_adc.h
index 663b79f4f3..42b48981f2 100644
--- a/include/hw/adc/stm32f2xx_adc.h
+++ b/include/hw/adc/stm32f2xx_adc.h
@@ -26,6 +26,7 @@
#define HW_STM32F2XX_ADC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define ADC_SR 0x00
#define ADC_CR1 0x04
@@ -58,10 +59,9 @@
#define ADC_COMMON_ADDRESS 0x100
#define TYPE_STM32F2XX_ADC "stm32f2xx-adc"
-#define STM32F2XX_ADC(obj) \
- OBJECT_CHECK(STM32F2XXADCState, (obj), TYPE_STM32F2XX_ADC)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXADCState, STM32F2XX_ADC)
-typedef struct {
+struct STM32F2XXADCState {
/* <private> */
SysBusDevice parent_obj;
@@ -84,6 +84,6 @@ typedef struct {
uint32_t adc_dr;
qemu_irq irq;
-} STM32F2XXADCState;
+};
#endif /* HW_STM32F2XX_ADC_H */
diff --git a/include/hw/misc/zynq-xadc.h b/include/hw/adc/zynq-xadc.h
index f1a410a376..c10cc4c379 100644
--- a/include/hw/misc/zynq-xadc.h
+++ b/include/hw/adc/zynq-xadc.h
@@ -16,17 +16,17 @@
#define ZYNQ_XADC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define ZYNQ_XADC_MMIO_SIZE 0x0020
#define ZYNQ_XADC_NUM_IO_REGS (ZYNQ_XADC_MMIO_SIZE / 4)
#define ZYNQ_XADC_NUM_ADC_REGS 128
#define ZYNQ_XADC_FIFO_DEPTH 15
-#define TYPE_ZYNQ_XADC "xlnx,zynq-xadc"
-#define ZYNQ_XADC(obj) \
- OBJECT_CHECK(ZynqXADCState, (obj), TYPE_ZYNQ_XADC)
+#define TYPE_ZYNQ_XADC "xlnx-zynq-xadc"
+OBJECT_DECLARE_SIMPLE_TYPE(ZynqXADCState, ZYNQ_XADC)
-typedef struct ZynqXADCState {
+struct ZynqXADCState {
/*< private >*/
SysBusDevice parent_obj;
@@ -39,8 +39,7 @@ typedef struct ZynqXADCState {
uint16_t xadc_dfifo[ZYNQ_XADC_FIFO_DEPTH];
uint16_t xadc_dfifo_entries;
- struct IRQState *qemu_irq;
-
-} ZynqXADCState;
+ qemu_irq irq;
+};
#endif /* ZYNQ_XADC_H */
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
index 77c82a9982..67a9a17b86 100644
--- a/include/hw/arm/allwinner-a10.h
+++ b/include/hw/arm/allwinner-a10.h
@@ -1,19 +1,22 @@
#ifndef HW_ARM_ALLWINNER_A10_H
#define HW_ARM_ALLWINNER_A10_H
-#include "qemu/error-report.h"
-#include "hw/char/serial.h"
-#include "hw/arm/boot.h"
#include "hw/timer/allwinner-a10-pit.h"
#include "hw/intc/allwinner-a10-pic.h"
#include "hw/net/allwinner_emac.h"
#include "hw/sd/allwinner-sdhost.h"
-#include "hw/ide/ahci.h"
+#include "hw/ide/ahci-sysbus.h"
#include "hw/usb/hcd-ohci.h"
#include "hw/usb/hcd-ehci.h"
#include "hw/rtc/allwinner-rtc.h"
+#include "hw/misc/allwinner-a10-ccm.h"
+#include "hw/misc/allwinner-a10-dramc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/watchdog/allwinner-wdt.h"
+#include "sysemu/block-backend.h"
#include "target/arm/cpu.h"
+#include "qom/object.h"
#define AW_A10_SDRAM_BASE 0x40000000
@@ -21,23 +24,47 @@
#define AW_A10_NUM_USB 2
#define TYPE_AW_A10 "allwinner-a10"
-#define AW_A10(obj) OBJECT_CHECK(AwA10State, (obj), TYPE_AW_A10)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10State, AW_A10)
-typedef struct AwA10State {
+struct AwA10State {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
ARMCPU cpu;
+ AwA10ClockCtlState ccm;
+ AwA10DramControllerState dramc;
AwA10PITState timer;
AwA10PICState intc;
AwEmacState emac;
AllwinnerAHCIState sata;
AwSdHostState mmc0;
+ AWI2CState i2c0;
AwRtcState rtc;
+ AwWdtState wdt;
MemoryRegion sram_a;
EHCISysBusState ehci[AW_A10_NUM_USB];
OHCISysBusState ohci[AW_A10_NUM_USB];
-} AwA10State;
+};
+
+/**
+ * Emulate Boot ROM firmware setup functionality.
+ *
+ * A real Allwinner A10 SoC contains a Boot ROM
+ * which is the first code that runs right after
+ * the SoC is powered on. The Boot ROM is responsible
+ * for loading user code (e.g. a bootloader) from any
+ * of the supported external devices and writing the
+ * downloaded code to internal SRAM. After loading the SoC
+ * begins executing the code written to SRAM.
+ *
+ * This function emulates the Boot ROM by copying 32 KiB
+ * of data at offset 8 KiB from the given block device and writes it to
+ * the start of the first internal SRAM memory.
+ *
+ * @s: Allwinner A10 state object pointer
+ * @blk: Block backend device object pointer
+ */
+void allwinner_a10_bootrom_setup(AwA10State *s, BlockBackend *blk);
#endif
diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h
index 82e4e59216..24ba4e1bf4 100644
--- a/include/hw/arm/allwinner-h3.h
+++ b/include/hw/arm/allwinner-h3.h
@@ -18,7 +18,7 @@
*/
/*
- * The Allwinner H3 is a System on Chip containing four ARM Cortex A7
+ * The Allwinner H3 is a System on Chip containing four ARM Cortex-A7
* processor cores. Features and specifications include DDR2/DDR3 memory,
* SD/MMC storage cards, 10/100/1000Mbit Ethernet, USB 2.0, HDMI and
* various I/O modules.
@@ -36,7 +36,6 @@
#define HW_ARM_ALLWINNER_H3_H
#include "qom/object.h"
-#include "hw/arm/boot.h"
#include "hw/timer/allwinner-a10-pit.h"
#include "hw/intc/arm_gic.h"
#include "hw/misc/allwinner-h3-ccu.h"
@@ -47,6 +46,8 @@
#include "hw/sd/allwinner-sdhost.h"
#include "hw/net/allwinner-sun8i-emac.h"
#include "hw/rtc/allwinner-rtc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/watchdog/allwinner-wdt.h"
#include "target/arm/cpu.h"
#include "sysemu/block-backend.h"
@@ -61,37 +62,42 @@
* @see AwH3State
*/
enum {
- AW_H3_SRAM_A1,
- AW_H3_SRAM_A2,
- AW_H3_SRAM_C,
- AW_H3_SYSCTRL,
- AW_H3_MMC0,
- AW_H3_SID,
- AW_H3_EHCI0,
- AW_H3_OHCI0,
- AW_H3_EHCI1,
- AW_H3_OHCI1,
- AW_H3_EHCI2,
- AW_H3_OHCI2,
- AW_H3_EHCI3,
- AW_H3_OHCI3,
- AW_H3_CCU,
- AW_H3_PIT,
- AW_H3_UART0,
- AW_H3_UART1,
- AW_H3_UART2,
- AW_H3_UART3,
- AW_H3_EMAC,
- AW_H3_DRAMCOM,
- AW_H3_DRAMCTL,
- AW_H3_DRAMPHY,
- AW_H3_GIC_DIST,
- AW_H3_GIC_CPU,
- AW_H3_GIC_HYP,
- AW_H3_GIC_VCPU,
- AW_H3_RTC,
- AW_H3_CPUCFG,
- AW_H3_SDRAM
+ AW_H3_DEV_SRAM_A1,
+ AW_H3_DEV_SRAM_A2,
+ AW_H3_DEV_SRAM_C,
+ AW_H3_DEV_SYSCTRL,
+ AW_H3_DEV_MMC0,
+ AW_H3_DEV_SID,
+ AW_H3_DEV_EHCI0,
+ AW_H3_DEV_OHCI0,
+ AW_H3_DEV_EHCI1,
+ AW_H3_DEV_OHCI1,
+ AW_H3_DEV_EHCI2,
+ AW_H3_DEV_OHCI2,
+ AW_H3_DEV_EHCI3,
+ AW_H3_DEV_OHCI3,
+ AW_H3_DEV_CCU,
+ AW_H3_DEV_PIT,
+ AW_H3_DEV_UART0,
+ AW_H3_DEV_UART1,
+ AW_H3_DEV_UART2,
+ AW_H3_DEV_UART3,
+ AW_H3_DEV_EMAC,
+ AW_H3_DEV_TWI0,
+ AW_H3_DEV_TWI1,
+ AW_H3_DEV_TWI2,
+ AW_H3_DEV_DRAMCOM,
+ AW_H3_DEV_DRAMCTL,
+ AW_H3_DEV_DRAMPHY,
+ AW_H3_DEV_GIC_DIST,
+ AW_H3_DEV_GIC_CPU,
+ AW_H3_DEV_GIC_HYP,
+ AW_H3_DEV_GIC_VCPU,
+ AW_H3_DEV_RTC,
+ AW_H3_DEV_CPUCFG,
+ AW_H3_DEV_R_TWI,
+ AW_H3_DEV_SDRAM,
+ AW_H3_DEV_WDT
};
/** Total number of CPU cores in the H3 SoC */
@@ -106,7 +112,7 @@ enum {
#define TYPE_AW_H3 "allwinner-h3"
/** Convert input object to Allwinner H3 state object */
-#define AW_H3(obj) OBJECT_CHECK(AwH3State, (obj), TYPE_AW_H3)
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3State, AW_H3)
/** @} */
@@ -116,7 +122,7 @@ enum {
* This struct contains the state of all the devices
* which are currently emulated by the H3 SoC code.
*/
-typedef struct AwH3State {
+struct AwH3State {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
@@ -130,13 +136,18 @@ typedef struct AwH3State {
AwH3SysCtrlState sysctrl;
AwSidState sid;
AwSdHostState mmc0;
+ AWI2CState i2c0;
+ AWI2CState i2c1;
+ AWI2CState i2c2;
+ AWI2CState r_twi;
AwSun8iEmacState emac;
AwRtcState rtc;
+ AwWdtState wdt;
GICState gic;
MemoryRegion sram_a1;
MemoryRegion sram_a2;
MemoryRegion sram_c;
-} AwH3State;
+};
/**
* Emulate Boot ROM firmware setup functionality.
diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h
new file mode 100644
index 0000000000..614e74b7ed
--- /dev/null
+++ b/include/hw/arm/allwinner-r40.h
@@ -0,0 +1,157 @@
+/*
+ * Allwinner R40/A40i/T3 System on Chip emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_ALLWINNER_R40_H
+#define HW_ARM_ALLWINNER_R40_H
+
+#include "qom/object.h"
+#include "hw/timer/allwinner-a10-pit.h"
+#include "hw/ide/ahci-sysbus.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/sd/allwinner-sdhost.h"
+#include "hw/misc/allwinner-r40-ccu.h"
+#include "hw/misc/allwinner-r40-dramc.h"
+#include "hw/misc/allwinner-sramc.h"
+#include "hw/i2c/allwinner-i2c.h"
+#include "hw/net/allwinner_emac.h"
+#include "hw/net/allwinner-sun8i-emac.h"
+#include "hw/usb/hcd-ohci.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/watchdog/allwinner-wdt.h"
+#include "target/arm/cpu.h"
+#include "sysemu/block-backend.h"
+
+enum {
+ AW_R40_DEV_SRAM_A1,
+ AW_R40_DEV_SRAM_A2,
+ AW_R40_DEV_SRAM_A3,
+ AW_R40_DEV_SRAM_A4,
+ AW_R40_DEV_SRAMC,
+ AW_R40_DEV_EMAC,
+ AW_R40_DEV_MMC0,
+ AW_R40_DEV_MMC1,
+ AW_R40_DEV_MMC2,
+ AW_R40_DEV_MMC3,
+ AW_R40_DEV_AHCI,
+ AW_R40_DEV_EHCI1,
+ AW_R40_DEV_OHCI1,
+ AW_R40_DEV_EHCI2,
+ AW_R40_DEV_OHCI2,
+ AW_R40_DEV_CCU,
+ AW_R40_DEV_PIT,
+ AW_R40_DEV_WDT,
+ AW_R40_DEV_UART0,
+ AW_R40_DEV_UART1,
+ AW_R40_DEV_UART2,
+ AW_R40_DEV_UART3,
+ AW_R40_DEV_UART4,
+ AW_R40_DEV_UART5,
+ AW_R40_DEV_UART6,
+ AW_R40_DEV_UART7,
+ AW_R40_DEV_TWI0,
+ AW_R40_DEV_GMAC,
+ AW_R40_DEV_GIC_DIST,
+ AW_R40_DEV_GIC_CPU,
+ AW_R40_DEV_GIC_HYP,
+ AW_R40_DEV_GIC_VCPU,
+ AW_R40_DEV_SDRAM,
+ AW_R40_DEV_DRAMCOM,
+ AW_R40_DEV_DRAMCTL,
+ AW_R40_DEV_DRAMPHY,
+};
+
+#define AW_R40_NUM_CPUS (4)
+
+/**
+ * Allwinner R40 object model
+ * @{
+ */
+
+/** Object type for the Allwinner R40 SoC */
+#define TYPE_AW_R40 "allwinner-r40"
+
+/** Convert input object to Allwinner R40 state object */
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40State, AW_R40)
+
+/** @} */
+
+/**
+ * Allwinner R40 object
+ *
+ * This struct contains the state of all the devices
+ * which are currently emulated by the R40 SoC code.
+ */
+#define AW_R40_NUM_MMCS 4
+#define AW_R40_NUM_USB 2
+#define AW_R40_NUM_UARTS 8
+
+struct AwR40State {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ /** Physical base address for start of RAM */
+ hwaddr ram_addr;
+
+ /** Total RAM size in megabytes */
+ uint32_t ram_size;
+
+ ARMCPU cpus[AW_R40_NUM_CPUS];
+ const hwaddr *memmap;
+ AwSRAMCState sramc;
+ AwA10PITState timer;
+ AwWdtState wdt;
+ AllwinnerAHCIState sata;
+ AwSdHostState mmc[AW_R40_NUM_MMCS];
+ EHCISysBusState ehci[AW_R40_NUM_USB];
+ OHCISysBusState ohci[AW_R40_NUM_USB];
+ AwR40ClockCtlState ccu;
+ AwR40DramCtlState dramc;
+ AWI2CState i2c0;
+ AwEmacState emac;
+ AwSun8iEmacState gmac;
+ GICState gic;
+ MemoryRegion sram_a1;
+ MemoryRegion sram_a2;
+ MemoryRegion sram_a3;
+ MemoryRegion sram_a4;
+};
+
+/**
+ * Emulate Boot ROM firmware setup functionality.
+ *
+ * A real Allwinner R40 SoC contains a Boot ROM
+ * which is the first code that runs right after
+ * the SoC is powered on. The Boot ROM is responsible
+ * for loading user code (e.g. a bootloader) from any
+ * of the supported external devices and writing the
+ * downloaded code to internal SRAM. After loading the SoC
+ * begins executing the code written to SRAM.
+ *
+ * This function emulates the Boot ROM by copying 32 KiB
+ * of data from the given block device and writes it to
+ * the start of the first internal SRAM memory.
+ *
+ * @s: Allwinner R40 state object pointer
+ * @blk: Block backend device object pointer
+ * @unit: the mmc control's unit
+ */
+bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit);
+
+#endif /* HW_ARM_ALLWINNER_R40_H */
diff --git a/include/hw/arm/armsse-version.h b/include/hw/arm/armsse-version.h
new file mode 100644
index 0000000000..60780fa984
--- /dev/null
+++ b/include/hw/arm/armsse-version.h
@@ -0,0 +1,42 @@
+/*
+ * ARM SSE (Subsystems for Embedded): IoTKit, SSE-200
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+#ifndef ARMSSE_VERSION_H
+#define ARMSSE_VERSION_H
+
+
+/*
+ * Define an enumeration of the possible values of the sse-version
+ * property implemented by various sub-devices of the SSE, and
+ * a validation function that checks that a valid value has been passed.
+ * These are arbitrary QEMU-internal values (nobody should be creating
+ * the sub-devices of the SSE except for the SSE object itself), but
+ * we pick obvious numbers for the benefit of people debugging with gdb.
+ */
+enum {
+ ARMSSE_IOTKIT = 0,
+ ARMSSE_SSE200 = 200,
+ ARMSSE_SSE300 = 300,
+};
+
+static inline bool armsse_version_valid(uint32_t sse_version)
+{
+ switch (sse_version) {
+ case ARMSSE_IOTKIT:
+ case ARMSSE_SSE200:
+ case ARMSSE_SSE300:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#endif
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
index 84080c2299..88b3b759c5 100644
--- a/include/hw/arm/armsse.h
+++ b/include/hw/arm/armsse.h
@@ -14,9 +14,9 @@
* hardware, which include the IoT Kit and the SSE-050, SSE-100 and
* SSE-200. Currently we model:
* - the Arm IoT Kit which is documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
* - the SSE-200 which is documented in
- * http://infocenter.arm.com/help/topic/com.arm.doc.101104_0100_00_en/corelink_sse200_subsystem_for_embedded_technical_reference_manual_101104_0100_00_en.pdf
+ * https://developer.arm.com/documentation/101104/latest/
*
* The IoTKit contains:
* a Cortex-M33
@@ -37,9 +37,10 @@
* per-CPU identity and control register blocks
*
* QEMU interface:
+ * + Clock input "MAINCLK": clock for CPUs and most peripherals
+ * + Clock input "S32KCLK": slow 32KHz clock used for a few peripherals
* + QOM property "memory" is a MemoryRegion containing the devices provided
* by the board model.
- * + QOM property "MAINCLK" is the frequency of the main system clock
* + QOM property "EXP_NUMIRQ" sets the number of expansion interrupts.
* (In hardware, the SSE-200 permits the number of expansion interrupts
* for the two CPUs to be configured separately, but we restrict it to
@@ -55,6 +56,9 @@
* (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
* SSE-200 both are present; CPU0 in an SSE-200 has neither.
* Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
+ * + QOM properties "CPU0_MPU_NS", "CPU0_MPU_S", "CPU1_MPU_NS" and "CPU1_MPU_S"
+ * which set the number of MPU regions on the CPUs. If there is only one
+ * CPU the CPU1 properties are not present.
* + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
* which are wired to its NVIC lines 32 .. n+32
* + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
@@ -96,18 +100,24 @@
#include "hw/misc/tz-mpc.h"
#include "hw/timer/cmsdk-apb-timer.h"
#include "hw/timer/cmsdk-apb-dualtimer.h"
+#include "hw/timer/sse-counter.h"
+#include "hw/timer/sse-timer.h"
#include "hw/watchdog/cmsdk-apb-watchdog.h"
#include "hw/misc/iotkit-sysctl.h"
#include "hw/misc/iotkit-sysinfo.h"
#include "hw/misc/armsse-cpuid.h"
#include "hw/misc/armsse-mhu.h"
+#include "hw/misc/armsse-cpu-pwrctrl.h"
#include "hw/misc/unimp.h"
#include "hw/or-irq.h"
+#include "hw/clock.h"
#include "hw/core/split-irq.h"
#include "hw/cpu/cluster.h"
+#include "qom/object.h"
-#define TYPE_ARMSSE "arm-sse"
-#define ARMSSE(obj) OBJECT_CHECK(ARMSSE, (obj), TYPE_ARMSSE)
+#define TYPE_ARM_SSE "arm-sse"
+OBJECT_DECLARE_TYPE(ARMSSE, ARMSSEClass,
+ ARM_SSE)
/*
* These type names are for specific IoTKit subsystems; other than
@@ -116,12 +126,14 @@
*/
#define TYPE_IOTKIT "iotkit"
#define TYPE_SSE200 "sse-200"
+#define TYPE_SSE300 "sse-300"
/* We have an IRQ splitter and an OR gate input for each external PPC
* and the 2 internal PPCs
*/
+#define NUM_INTERNAL_PPCS 2
#define NUM_EXTERNAL_PPCS (IOTS_NUM_AHB_EXP_PPC + IOTS_NUM_APB_EXP_PPC)
-#define NUM_PPCS (NUM_EXTERNAL_PPCS + 2)
+#define NUM_PPCS (NUM_EXTERNAL_PPCS + NUM_INTERNAL_PPCS)
#define MAX_SRAM_BANKS 4
#if MAX_SRAM_BANKS > IOTS_NUM_MPC
@@ -130,17 +142,12 @@
#define SSE_MAX_CPUS 2
-/* These define what each PPU in the ppu[] index is for */
-#define CPU0CORE_PPU 0
-#define CPU1CORE_PPU 1
-#define DBG_PPU 2
-#define RAM0_PPU 3
-#define RAM1_PPU 4
-#define RAM2_PPU 5
-#define RAM3_PPU 6
-#define NUM_PPUS 7
-
-typedef struct ARMSSE {
+#define NUM_PPUS 8
+
+/* Number of CPU IRQs used by the SSE itself */
+#define NUM_SSE_IRQS 32
+
+struct ARMSSE {
/*< private >*/
SysBusDevice parent_obj;
@@ -148,37 +155,37 @@ typedef struct ARMSSE {
ARMv7MState armv7m[SSE_MAX_CPUS];
CPUClusterState cluster[SSE_MAX_CPUS];
IoTKitSecCtl secctl;
- TZPPC apb_ppc0;
- TZPPC apb_ppc1;
+ TZPPC apb_ppc[NUM_INTERNAL_PPCS];
TZMPC mpc[IOTS_NUM_MPC];
- CMSDKAPBTIMER timer0;
- CMSDKAPBTIMER timer1;
- CMSDKAPBTIMER s32ktimer;
- qemu_or_irq ppc_irq_orgate;
+ CMSDKAPBTimer timer[3];
+ OrIRQState ppc_irq_orgate;
SplitIRQ sec_resp_splitter;
SplitIRQ ppc_irq_splitter[NUM_PPCS];
SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
- qemu_or_irq mpc_irq_orgate;
- qemu_or_irq nmi_orgate;
+ OrIRQState mpc_irq_orgate;
+ OrIRQState nmi_orgate;
- SplitIRQ cpu_irq_splitter[32];
+ SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
CMSDKAPBDualTimer dualtimer;
- CMSDKAPBWatchdog s32kwatchdog;
- CMSDKAPBWatchdog nswatchdog;
- CMSDKAPBWatchdog swatchdog;
+ CMSDKAPBWatchdog cmsdk_watchdog[3];
+
+ SSECounter sse_counter;
+ SSETimer sse_timer[4];
IoTKitSysCtl sysctl;
IoTKitSysCtl sysinfo;
ARMSSEMHU mhu[2];
- UnimplementedDeviceState ppu[NUM_PPUS];
+ UnimplementedDeviceState unimp[NUM_PPUS];
UnimplementedDeviceState cachectrl[SSE_MAX_CPUS];
UnimplementedDeviceState cpusecctrl[SSE_MAX_CPUS];
ARMSSECPUID cpuid[SSE_MAX_CPUS];
+ ARMSSECPUPwrCtrl cpu_pwrctrl[SSE_MAX_CPUS];
+
/*
* 'container' holds all devices seen by all CPUs.
* 'cpu_container[i]' is the view that CPU i has: this has the
@@ -194,6 +201,8 @@ typedef struct ARMSSE {
MemoryRegion alias2;
MemoryRegion alias3[SSE_MAX_CPUS];
MemoryRegion sram[MAX_SRAM_BANKS];
+ MemoryRegion itcm;
+ MemoryRegion dtcm;
qemu_irq *exp_irqs[SSE_MAX_CPUS];
qemu_irq ppc0_irq;
@@ -207,26 +216,26 @@ typedef struct ARMSSE {
uint32_t nsccfg;
+ Clock *mainclk;
+ Clock *s32kclk;
+
/* Properties */
MemoryRegion *board_memory;
uint32_t exp_numirq;
- uint32_t mainclk_frq;
uint32_t sram_addr_width;
uint32_t init_svtor;
+ uint32_t cpu_mpu_ns[SSE_MAX_CPUS];
+ uint32_t cpu_mpu_s[SSE_MAX_CPUS];
bool cpu_fpu[SSE_MAX_CPUS];
bool cpu_dsp[SSE_MAX_CPUS];
-} ARMSSE;
+};
typedef struct ARMSSEInfo ARMSSEInfo;
-typedef struct ARMSSEClass {
- DeviceClass parent_class;
+struct ARMSSEClass {
+ SysBusDeviceClass parent_class;
const ARMSSEInfo *info;
-} ARMSSEClass;
+};
-#define ARMSSE_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMSSEClass, (klass), TYPE_ARMSSE)
-#define ARMSSE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMSSEClass, (obj), TYPE_ARMSSE)
#endif
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
index a30e3c6471..5c057ab2ec 100644
--- a/include/hw/arm/armv7m.h
+++ b/include/hw/arm/armv7m.h
@@ -12,12 +12,15 @@
#include "hw/sysbus.h"
#include "hw/intc/armv7m_nvic.h"
+#include "hw/misc/armv7m_ras.h"
#include "target/arm/idau.h"
+#include "qom/object.h"
+#include "hw/clock.h"
-#define TYPE_BITBAND "ARM,bitband-memory"
-#define BITBAND(obj) OBJECT_CHECK(BitBandState, (obj), TYPE_BITBAND)
+#define TYPE_BITBAND "ARM-bitband-memory"
+OBJECT_DECLARE_SIMPLE_TYPE(BitBandState, BITBAND)
-typedef struct {
+struct BitBandState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -26,10 +29,10 @@ typedef struct {
MemoryRegion iomem;
uint32_t base;
MemoryRegion *source_memory;
-} BitBandState;
+};
#define TYPE_ARMV7M "armv7m"
-#define ARMV7M(obj) OBJECT_CHECK(ARMv7MState, (obj), TYPE_ARMV7M)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M)
#define ARMV7M_NUM_BITBANDS 2
@@ -40,27 +43,57 @@ typedef struct {
* a qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET).
* + Property "cpu-type": CPU type to instantiate
* + Property "num-irq": number of external IRQ lines
+ * + Property "num-prio-bits": number of priority bits in the NVIC
* + Property "memory": MemoryRegion defining the physical address space
* that CPU accesses see. (The NVIC, bitbanding and other CPU-internal
* devices will be automatically layered on top of this view.)
* + Property "idau": IDAU interface (forwarded to CPU object)
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
+ * + Property "init-nsvtor": non-secure VTOR reset value (forwarded to CPU object)
* + Property "vfp": enable VFP (forwarded to CPU object)
* + Property "dsp": enable DSP (forwarded to CPU object)
* + Property "enable-bitband": expose bitbanded IO
+ * + Property "mpu-ns-regions": number of Non-Secure MPU regions (forwarded
+ * to CPU object pmsav7-dregion property; default is whatever the default
+ * for the CPU is)
+ * + Property "mpu-s-regions": number of Secure MPU regions (default is
+ * whatever the default for the CPU is; must currently be set to the same
+ * value as mpu-ns-regions if the CPU implements the Security Extension)
+ * + Clock input "refclk" is the external reference clock for the systick timers
+ * + Clock input "cpuclk" is the main CPU clock
*/
-typedef struct ARMv7MState {
+struct ARMv7MState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
NVICState nvic;
BitBandState bitband[ARMV7M_NUM_BITBANDS];
ARMCPU *cpu;
+ ARMv7MRAS ras;
+ SysTickState systick[M_REG_NUM_BANKS];
/* MemoryRegion we pass to the CPU, with our devices layered on
* top of the ones the board provides in board_memory.
*/
MemoryRegion container;
+ /*
+ * MemoryRegion which passes the transaction to either the S or the
+ * NS systick device depending on the transaction attributes
+ */
+ MemoryRegion systickmem;
+ /*
+ * MemoryRegion which enforces the S/NS handling of the systick
+ * device NS alias region and passes the transaction to the
+ * NS systick device if appropriate.
+ */
+ MemoryRegion systick_ns_mem;
+ /* Ditto, for the sysregs region provided by the NVIC */
+ MemoryRegion sysreg_ns_mem;
+ /* MR providing default PPB behaviour */
+ MemoryRegion defaultmem;
+
+ Clock *refclk;
+ Clock *cpuclk;
/* Properties */
char *cpu_type;
@@ -68,10 +101,13 @@ typedef struct ARMv7MState {
MemoryRegion *board_memory;
Object *idau;
uint32_t init_svtor;
+ uint32_t init_nsvtor;
+ uint32_t mpu_ns_regions;
+ uint32_t mpu_s_regions;
bool enable_bitband;
bool start_powered_off;
bool vfp;
bool dsp;
-} ARMv7MState;
+};
#endif
diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h
index 09da9d9acc..cbeacb214c 100644
--- a/include/hw/arm/aspeed.h
+++ b/include/hw/arm/aspeed.h
@@ -10,24 +10,22 @@
#define ARM_ASPEED_H
#include "hw/boards.h"
+#include "qom/object.h"
typedef struct AspeedMachineState AspeedMachineState;
#define TYPE_ASPEED_MACHINE MACHINE_TYPE_NAME("aspeed")
-#define ASPEED_MACHINE(obj) \
- OBJECT_CHECK(AspeedMachineState, (obj), TYPE_ASPEED_MACHINE)
+typedef struct AspeedMachineClass AspeedMachineClass;
+DECLARE_OBJ_CHECKERS(AspeedMachineState, AspeedMachineClass,
+ ASPEED_MACHINE, TYPE_ASPEED_MACHINE)
#define ASPEED_MAC0_ON (1 << 0)
#define ASPEED_MAC1_ON (1 << 1)
#define ASPEED_MAC2_ON (1 << 2)
#define ASPEED_MAC3_ON (1 << 3)
-#define ASPEED_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedMachineClass, (klass), TYPE_ASPEED_MACHINE)
-#define ASPEED_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedMachineClass, (obj), TYPE_ASPEED_MACHINE)
-typedef struct AspeedMachineClass {
+struct AspeedMachineClass {
MachineClass parent_obj;
const char *name;
@@ -40,7 +38,8 @@ typedef struct AspeedMachineClass {
uint32_t num_cs;
uint32_t macs_mask;
void (*i2c_init)(AspeedMachineState *bmc);
-} AspeedMachineClass;
+ uint32_t uart_default;
+};
#endif
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 914115f3ef..c60fac900a 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -13,45 +13,63 @@
#define ASPEED_SOC_H
#include "hw/cpu/a15mpcore.h"
+#include "hw/arm/armv7m.h"
#include "hw/intc/aspeed_vic.h"
#include "hw/misc/aspeed_scu.h"
+#include "hw/adc/aspeed_adc.h"
#include "hw/misc/aspeed_sdmc.h"
#include "hw/misc/aspeed_xdma.h"
#include "hw/timer/aspeed_timer.h"
#include "hw/rtc/aspeed_rtc.h"
#include "hw/i2c/aspeed_i2c.h"
+#include "hw/misc/aspeed_i3c.h"
#include "hw/ssi/aspeed_smc.h"
+#include "hw/misc/aspeed_hace.h"
+#include "hw/misc/aspeed_sbc.h"
#include "hw/watchdog/wdt_aspeed.h"
#include "hw/net/ftgmac100.h"
#include "target/arm/cpu.h"
#include "hw/gpio/aspeed_gpio.h"
#include "hw/sd/aspeed_sdhci.h"
#include "hw/usb/hcd-ehci.h"
+#include "qom/object.h"
+#include "hw/misc/aspeed_lpc.h"
+#include "hw/misc/unimp.h"
+#include "hw/misc/aspeed_peci.h"
+#include "hw/fsi/aspeed_apb2opb.h"
+#include "hw/char/serial.h"
#define ASPEED_SPIS_NUM 2
#define ASPEED_EHCIS_NUM 2
#define ASPEED_WDTS_NUM 4
#define ASPEED_CPUS_NUM 2
#define ASPEED_MACS_NUM 4
+#define ASPEED_UARTS_NUM 13
+#define ASPEED_JTAG_NUM 2
-typedef struct AspeedSoCState {
- /*< private >*/
+struct AspeedSoCState {
DeviceState parent;
- /*< public >*/
- ARMCPU cpu[ASPEED_CPUS_NUM];
- A15MPPrivState a7mpcore;
+ MemoryRegion *memory;
MemoryRegion *dram_mr;
+ MemoryRegion dram_container;
MemoryRegion sram;
- AspeedVICState vic;
+ MemoryRegion spi_boot_container;
+ MemoryRegion spi_boot;
AspeedRtcState rtc;
AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c;
+ AspeedI3CState i3c;
AspeedSCUState scu;
+ AspeedHACEState hace;
AspeedXDMAState xdma;
+ AspeedADCState adc;
AspeedSMCState fmc;
AspeedSMCState spi[ASPEED_SPIS_NUM];
EHCISysBusState ehci[ASPEED_EHCIS_NUM];
+ AspeedSBCState sbc;
+ MemoryRegion secsram;
+ UnimplementedDeviceState sbc_unimplemented;
AspeedSDMCState sdmc;
AspeedWDTState wdt[ASPEED_WDTS_NUM];
FTGMAC100State ftgmac100[ASPEED_MACS_NUM];
@@ -60,79 +78,176 @@ typedef struct AspeedSoCState {
AspeedGPIOState gpio_1_8v;
AspeedSDHCIState sdhci;
AspeedSDHCIState emmc;
-} AspeedSoCState;
+ AspeedLPCState lpc;
+ AspeedPECIState peci;
+ SerialMM uart[ASPEED_UARTS_NUM];
+ Clock *sysclk;
+ UnimplementedDeviceState iomem;
+ UnimplementedDeviceState video;
+ UnimplementedDeviceState emmc_boot_controller;
+ UnimplementedDeviceState dpmcu;
+ UnimplementedDeviceState pwm;
+ UnimplementedDeviceState espi;
+ UnimplementedDeviceState udc;
+ UnimplementedDeviceState sgpiom;
+ UnimplementedDeviceState jtag[ASPEED_JTAG_NUM];
+ AspeedAPB2OPBState fsi[2];
+};
#define TYPE_ASPEED_SOC "aspeed-soc"
-#define ASPEED_SOC(obj) OBJECT_CHECK(AspeedSoCState, (obj), TYPE_ASPEED_SOC)
+OBJECT_DECLARE_TYPE(AspeedSoCState, AspeedSoCClass, ASPEED_SOC)
+
+struct Aspeed2400SoCState {
+ AspeedSoCState parent;
+
+ ARMCPU cpu[ASPEED_CPUS_NUM];
+ AspeedVICState vic;
+};
+
+#define TYPE_ASPEED2400_SOC "aspeed2400-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2400SoCState, ASPEED2400_SOC)
+
+struct Aspeed2600SoCState {
+ AspeedSoCState parent;
+
+ A15MPPrivState a7mpcore;
+ ARMCPU cpu[ASPEED_CPUS_NUM]; /* XXX belong to a7mpcore */
+};
+
+#define TYPE_ASPEED2600_SOC "aspeed2600-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2600SoCState, ASPEED2600_SOC)
+
+struct Aspeed10x0SoCState {
+ AspeedSoCState parent;
+
+ ARMv7MState armv7m;
+};
+
+#define TYPE_ASPEED10X0_SOC "aspeed10x0-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC)
-typedef struct AspeedSoCClass {
+struct AspeedSoCClass {
DeviceClass parent_class;
const char *name;
- const char *cpu_type;
+ /** valid_cpu_types: NULL terminated array of a single CPU type. */
+ const char * const *valid_cpu_types;
uint32_t silicon_rev;
uint64_t sram_size;
+ uint64_t secsram_size;
int spis_num;
int ehcis_num;
int wdts_num;
int macs_num;
+ int uarts_num;
+ int uarts_base;
const int *irqmap;
const hwaddr *memmap;
uint32_t num_cpus;
-} AspeedSoCClass;
+ qemu_irq (*get_irq)(AspeedSoCState *s, int dev);
+};
-#define ASPEED_SOC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSoCClass, (klass), TYPE_ASPEED_SOC)
-#define ASPEED_SOC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSoCClass, (obj), TYPE_ASPEED_SOC)
+const char *aspeed_soc_cpu_type(AspeedSoCClass *sc);
enum {
- ASPEED_IOMEM,
- ASPEED_UART1,
- ASPEED_UART2,
- ASPEED_UART3,
- ASPEED_UART4,
- ASPEED_UART5,
- ASPEED_VUART,
- ASPEED_FMC,
- ASPEED_SPI1,
- ASPEED_SPI2,
- ASPEED_EHCI1,
- ASPEED_EHCI2,
- ASPEED_VIC,
- ASPEED_SDMC,
- ASPEED_SCU,
- ASPEED_ADC,
- ASPEED_VIDEO,
- ASPEED_SRAM,
- ASPEED_SDHCI,
- ASPEED_GPIO,
- ASPEED_GPIO_1_8V,
- ASPEED_RTC,
- ASPEED_TIMER1,
- ASPEED_TIMER2,
- ASPEED_TIMER3,
- ASPEED_TIMER4,
- ASPEED_TIMER5,
- ASPEED_TIMER6,
- ASPEED_TIMER7,
- ASPEED_TIMER8,
- ASPEED_WDT,
- ASPEED_PWM,
- ASPEED_LPC,
- ASPEED_IBT,
- ASPEED_I2C,
- ASPEED_ETH1,
- ASPEED_ETH2,
- ASPEED_ETH3,
- ASPEED_ETH4,
- ASPEED_MII1,
- ASPEED_MII2,
- ASPEED_MII3,
- ASPEED_MII4,
- ASPEED_SDRAM,
- ASPEED_XDMA,
- ASPEED_EMMC,
+ ASPEED_DEV_SPI_BOOT,
+ ASPEED_DEV_IOMEM,
+ ASPEED_DEV_UART0,
+ ASPEED_DEV_UART1,
+ ASPEED_DEV_UART2,
+ ASPEED_DEV_UART3,
+ ASPEED_DEV_UART4,
+ ASPEED_DEV_UART5,
+ ASPEED_DEV_UART6,
+ ASPEED_DEV_UART7,
+ ASPEED_DEV_UART8,
+ ASPEED_DEV_UART9,
+ ASPEED_DEV_UART10,
+ ASPEED_DEV_UART11,
+ ASPEED_DEV_UART12,
+ ASPEED_DEV_UART13,
+ ASPEED_DEV_VUART,
+ ASPEED_DEV_FMC,
+ ASPEED_DEV_SPI1,
+ ASPEED_DEV_SPI2,
+ ASPEED_DEV_EHCI1,
+ ASPEED_DEV_EHCI2,
+ ASPEED_DEV_VIC,
+ ASPEED_DEV_SDMC,
+ ASPEED_DEV_SCU,
+ ASPEED_DEV_ADC,
+ ASPEED_DEV_SBC,
+ ASPEED_DEV_SECSRAM,
+ ASPEED_DEV_EMMC_BC,
+ ASPEED_DEV_VIDEO,
+ ASPEED_DEV_SRAM,
+ ASPEED_DEV_SDHCI,
+ ASPEED_DEV_GPIO,
+ ASPEED_DEV_GPIO_1_8V,
+ ASPEED_DEV_RTC,
+ ASPEED_DEV_TIMER1,
+ ASPEED_DEV_TIMER2,
+ ASPEED_DEV_TIMER3,
+ ASPEED_DEV_TIMER4,
+ ASPEED_DEV_TIMER5,
+ ASPEED_DEV_TIMER6,
+ ASPEED_DEV_TIMER7,
+ ASPEED_DEV_TIMER8,
+ ASPEED_DEV_WDT,
+ ASPEED_DEV_PWM,
+ ASPEED_DEV_LPC,
+ ASPEED_DEV_IBT,
+ ASPEED_DEV_I2C,
+ ASPEED_DEV_PECI,
+ ASPEED_DEV_ETH1,
+ ASPEED_DEV_ETH2,
+ ASPEED_DEV_ETH3,
+ ASPEED_DEV_ETH4,
+ ASPEED_DEV_MII1,
+ ASPEED_DEV_MII2,
+ ASPEED_DEV_MII3,
+ ASPEED_DEV_MII4,
+ ASPEED_DEV_SDRAM,
+ ASPEED_DEV_XDMA,
+ ASPEED_DEV_EMMC,
+ ASPEED_DEV_KCS,
+ ASPEED_DEV_HACE,
+ ASPEED_DEV_DPMCU,
+ ASPEED_DEV_DP,
+ ASPEED_DEV_I3C,
+ ASPEED_DEV_ESPI,
+ ASPEED_DEV_UDC,
+ ASPEED_DEV_SGPIOM,
+ ASPEED_DEV_JTAG0,
+ ASPEED_DEV_JTAG1,
+ ASPEED_DEV_FSI1,
+ ASPEED_DEV_FSI2,
};
+qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev);
+bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp);
+void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr);
+bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp);
+void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr);
+void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev,
+ const char *name, hwaddr addr,
+ uint64_t size);
+void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
+ unsigned int count, int unit0);
+
+static inline int aspeed_uart_index(int uart_dev)
+{
+ return uart_dev - ASPEED_DEV_UART0;
+}
+
+static inline int aspeed_uart_first(AspeedSoCClass *sc)
+{
+ return aspeed_uart_index(sc->uarts_base);
+}
+
+static inline int aspeed_uart_last(AspeedSoCClass *sc)
+{
+ return aspeed_uart_first(sc) + sc->uarts_num - 1;
+}
+
#endif /* ASPEED_SOC_H */
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
index 48a0ad1633..636203baa5 100644
--- a/include/hw/arm/bcm2835_peripherals.h
+++ b/include/hw/arm/bcm2835_peripherals.h
@@ -17,24 +17,32 @@
#include "hw/char/bcm2835_aux.h"
#include "hw/display/bcm2835_fb.h"
#include "hw/dma/bcm2835_dma.h"
+#include "hw/or-irq.h"
#include "hw/intc/bcm2835_ic.h"
#include "hw/misc/bcm2835_property.h"
#include "hw/misc/bcm2835_rng.h"
#include "hw/misc/bcm2835_mbox.h"
#include "hw/misc/bcm2835_mphi.h"
#include "hw/misc/bcm2835_thermal.h"
+#include "hw/misc/bcm2835_cprman.h"
+#include "hw/misc/bcm2835_powermgt.h"
#include "hw/sd/sdhci.h"
#include "hw/sd/bcm2835_sdhost.h"
#include "hw/gpio/bcm2835_gpio.h"
#include "hw/timer/bcm2835_systmr.h"
#include "hw/usb/hcd-dwc2.h"
+#include "hw/ssi/bcm2835_spi.h"
+#include "hw/i2c/bcm2835_i2c.h"
#include "hw/misc/unimp.h"
+#include "qom/object.h"
+#define TYPE_BCM_SOC_PERIPHERALS_BASE "bcm-soc-peripherals-base"
+OBJECT_DECLARE_TYPE(BCMSocPeripheralBaseState, BCMSocPeripheralBaseClass,
+ BCM_SOC_PERIPHERALS_BASE)
#define TYPE_BCM2835_PERIPHERALS "bcm2835-peripherals"
-#define BCM2835_PERIPHERALS(obj) \
- OBJECT_CHECK(BCM2835PeripheralState, (obj), TYPE_BCM2835_PERIPHERALS)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PeripheralState, BCM2835_PERIPHERALS)
-typedef struct BCM2835PeripheralState {
+struct BCMSocPeripheralBaseState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -45,31 +53,53 @@ typedef struct BCM2835PeripheralState {
BCM2835SystemTimerState systmr;
BCM2835MphiState mphi;
+ UnimplementedDeviceState txp;
UnimplementedDeviceState armtmr;
- UnimplementedDeviceState cprman;
- UnimplementedDeviceState a2w;
+ BCM2835PowerMgtState powermgt;
+ BCM2835CprmanState cprman;
PL011State uart0;
BCM2835AuxState aux;
BCM2835FBState fb;
BCM2835DMAState dma;
+ OrIRQState orgated_dma_irq;
BCM2835ICState ic;
BCM2835PropertyState property;
- BCM2835RngState rng;
BCM2835MboxState mboxes;
SDHCIState sdhci;
BCM2835SDHostState sdhost;
- BCM2835GpioState gpio;
- Bcm2835ThermalState thermal;
UnimplementedDeviceState i2s;
- UnimplementedDeviceState spi[1];
- UnimplementedDeviceState i2c[3];
+ BCM2835SPIState spi[1];
+ BCM2835I2CState i2c[3];
+ OrIRQState orgated_i2c_irq;
UnimplementedDeviceState otp;
UnimplementedDeviceState dbus;
UnimplementedDeviceState ave0;
+ UnimplementedDeviceState v3d;
UnimplementedDeviceState bscsl;
UnimplementedDeviceState smi;
DWC2State dwc2;
UnimplementedDeviceState sdramc;
-} BCM2835PeripheralState;
+};
+
+struct BCMSocPeripheralBaseClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+ uint64_t peri_size; /* Peripheral range size */
+};
+
+struct BCM2835PeripheralState {
+ /*< private >*/
+ BCMSocPeripheralBaseState parent_obj;
+ /*< public >*/
+ BCM2835RngState rng;
+ Bcm2835ThermalState thermal;
+ BCM2835GpioState gpio;
+};
+
+void create_unimp(BCMSocPeripheralBaseState *ps,
+ UnimplementedDeviceState *uds,
+ const char *name, hwaddr ofs, hwaddr size);
+void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp);
#endif /* BCM2835_PERIPHERALS_H */
diff --git a/include/hw/arm/bcm2836.h b/include/hw/arm/bcm2836.h
index 79dfff9d73..918fb3bf14 100644
--- a/include/hw/arm/bcm2836.h
+++ b/include/hw/arm/bcm2836.h
@@ -15,9 +15,12 @@
#include "hw/arm/bcm2835_peripherals.h"
#include "hw/intc/bcm2836_control.h"
#include "target/arm/cpu.h"
+#include "qom/object.h"
+#define TYPE_BCM283X_BASE "bcm283x-base"
+OBJECT_DECLARE_TYPE(BCM283XBaseState, BCM283XBaseClass, BCM283X_BASE)
#define TYPE_BCM283X "bcm283x"
-#define BCM283X(obj) OBJECT_CHECK(BCM283XState, (obj), TYPE_BCM283X)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM283XState, BCM283X)
#define BCM283X_NCPUS 4
@@ -25,10 +28,11 @@
* them, code using these devices should always handle them via the
* BCM283x base class, so they have no BCM2836(obj) etc macros.
*/
+#define TYPE_BCM2835 "bcm2835"
#define TYPE_BCM2836 "bcm2836"
#define TYPE_BCM2837 "bcm2837"
-typedef struct BCM283XState {
+struct BCM283XBaseState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
@@ -39,19 +43,28 @@ typedef struct BCM283XState {
ARMCPU core;
} cpu[BCM283X_NCPUS];
BCM2836ControlState control;
- BCM2835PeripheralState peripherals;
-} BCM283XState;
-
-typedef struct BCM283XInfo BCM283XInfo;
+};
-typedef struct BCM283XClass {
+struct BCM283XBaseClass {
+ /*< private >*/
DeviceClass parent_class;
- const BCM283XInfo *info;
-} BCM283XClass;
+ /*< public >*/
+ const char *name;
+ const char *cpu_type;
+ unsigned core_count;
+ hwaddr peri_base; /* Peripheral base address seen by the CPU */
+ hwaddr ctrl_base; /* Interrupt controller and mailboxes etc. */
+ int clusterid;
+};
+
+struct BCM283XState {
+ /*< private >*/
+ BCM283XBaseState parent_obj;
+ /*< public >*/
+ BCM2835PeripheralState peripherals;
+};
-#define BCM283X_CLASS(klass) \
- OBJECT_CLASS_CHECK(BCM283XClass, (klass), TYPE_BCM283X)
-#define BCM283X_GET_CLASS(obj) \
- OBJECT_GET_CLASS(BCM283XClass, (obj), TYPE_BCM283X)
+bool bcm283x_common_realize(DeviceState *dev, BCMSocPeripheralBaseState *ps,
+ Error **errp);
#endif /* BCM2836_H */
diff --git a/include/hw/arm/bcm2838.h b/include/hw/arm/bcm2838.h
new file mode 100644
index 0000000000..e53c7bedf9
--- /dev/null
+++ b/include/hw/arm/bcm2838.h
@@ -0,0 +1,31 @@
+/*
+ * BCM2838 SoC emulation
+ *
+ * Copyright (C) 2022 Ovchinnikov Vitalii <vitalii.ovchinnikov@auriga.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BCM2838_H
+#define BCM2838_H
+
+#include "hw/arm/bcm2836.h"
+#include "hw/intc/arm_gic.h"
+#include "hw/arm/bcm2838_peripherals.h"
+
+#define BCM2838_PERI_LOW_BASE 0xfc000000
+#define BCM2838_GIC_BASE 0x40000
+
+#define TYPE_BCM2838 "bcm2838"
+
+OBJECT_DECLARE_TYPE(BCM2838State, BCM2838Class, BCM2838)
+
+struct BCM2838State {
+ /*< private >*/
+ BCM283XBaseState parent_obj;
+ /*< public >*/
+ BCM2838PeripheralState peripherals;
+ GICState gic;
+};
+
+#endif /* BCM2838_H */
diff --git a/include/hw/arm/bcm2838_peripherals.h b/include/hw/arm/bcm2838_peripherals.h
new file mode 100644
index 0000000000..7ee1bd066f
--- /dev/null
+++ b/include/hw/arm/bcm2838_peripherals.h
@@ -0,0 +1,84 @@
+/*
+ * BCM2838 peripherals emulation
+ *
+ * Copyright (C) 2022 Ovchinnikov Vitalii <vitalii.ovchinnikov@auriga.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BCM2838_PERIPHERALS_H
+#define BCM2838_PERIPHERALS_H
+
+#include "hw/arm/bcm2835_peripherals.h"
+#include "hw/sd/sdhci.h"
+#include "hw/gpio/bcm2838_gpio.h"
+
+/* SPI */
+#define GIC_SPI_INTERRUPT_MBOX 33
+#define GIC_SPI_INTERRUPT_MPHI 40
+#define GIC_SPI_INTERRUPT_DWC2 73
+#define GIC_SPI_INTERRUPT_DMA_0 80
+#define GIC_SPI_INTERRUPT_DMA_6 86
+#define GIC_SPI_INTERRUPT_DMA_7_8 87
+#define GIC_SPI_INTERRUPT_DMA_9_10 88
+#define GIC_SPI_INTERRUPT_AUX_UART1 93
+#define GIC_SPI_INTERRUPT_SDHOST 120
+#define GIC_SPI_INTERRUPT_UART0 121
+#define GIC_SPI_INTERRUPT_RNG200 125
+#define GIC_SPI_INTERRUPT_EMMC_EMMC2 126
+#define GIC_SPI_INTERRUPT_PCI_INT_A 143
+#define GIC_SPI_INTERRUPT_GENET_A 157
+#define GIC_SPI_INTERRUPT_GENET_B 158
+
+
+/* GPU (legacy) DMA interrupts */
+#define GPU_INTERRUPT_DMA0 16
+#define GPU_INTERRUPT_DMA1 17
+#define GPU_INTERRUPT_DMA2 18
+#define GPU_INTERRUPT_DMA3 19
+#define GPU_INTERRUPT_DMA4 20
+#define GPU_INTERRUPT_DMA5 21
+#define GPU_INTERRUPT_DMA6 22
+#define GPU_INTERRUPT_DMA7_8 23
+#define GPU_INTERRUPT_DMA9_10 24
+#define GPU_INTERRUPT_DMA11 25
+#define GPU_INTERRUPT_DMA12 26
+#define GPU_INTERRUPT_DMA13 27
+#define GPU_INTERRUPT_DMA14 28
+#define GPU_INTERRUPT_DMA15 31
+
+#define BCM2838_MPHI_OFFSET 0xb200
+#define BCM2838_MPHI_SIZE 0x200
+
+#define TYPE_BCM2838_PERIPHERALS "bcm2838-peripherals"
+OBJECT_DECLARE_TYPE(BCM2838PeripheralState, BCM2838PeripheralClass,
+ BCM2838_PERIPHERALS)
+
+struct BCM2838PeripheralState {
+ /*< private >*/
+ BCMSocPeripheralBaseState parent_obj;
+
+ /*< public >*/
+ MemoryRegion peri_low_mr;
+ MemoryRegion peri_low_mr_alias;
+ MemoryRegion mphi_mr_alias;
+
+ SDHCIState emmc2;
+ BCM2838GpioState gpio;
+
+ OrIRQState mmc_irq_orgate;
+ OrIRQState dma_7_8_irq_orgate;
+ OrIRQState dma_9_10_irq_orgate;
+
+ UnimplementedDeviceState asb;
+ UnimplementedDeviceState clkisp;
+};
+
+struct BCM2838PeripheralClass {
+ /*< private >*/
+ BCMSocPeripheralBaseClass parent_class;
+ /*< public >*/
+ uint64_t peri_low_size; /* Peripheral lower range size */
+};
+
+#endif /* BCM2838_PERIPHERALS_H */
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
index ce2b48b88b..80c492d742 100644
--- a/include/hw/arm/boot.h
+++ b/include/hw/arm/boot.h
@@ -25,13 +25,16 @@ typedef enum {
* armv7m_load_kernel:
* @cpu: CPU
* @kernel_filename: file to load
+ * @mem_base: base address to load image at (should be where the
+ * CPU expects to find its vector table on reset)
* @mem_size: mem_size: maximum image size to load
*
* Load the guest image for an ARMv7M system. This must be called by
* any ARMv7M board. (This is necessary to ensure that the CPU resets
* correctly on system reset, as well as for kernel loading.)
*/
-void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
+void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
+ hwaddr mem_base, int mem_size);
/* arm_boot.c */
struct arm_boot_info {
@@ -56,7 +59,6 @@ struct arm_boot_info {
hwaddr smp_loader_start;
hwaddr smp_bootreg_addr;
hwaddr gic_cpu_if_addr;
- int nb_cpus;
int board_id;
/* ARM machines that support the ARM Security Extensions use this field to
* control whether Linux is booted as secure(true) or non-secure(false).
@@ -70,6 +72,9 @@ struct arm_boot_info {
* boot loader/boot ROM code, and secondary_cpu_reset_hook() should
* perform any necessary CPU reset handling and set the PC for the
* secondary CPUs to point at this boot blob.
+ *
+ * These hooks won't be called if secondary CPUs are booting via
+ * emulated PSCI (see psci_conduit below).
*/
void (*write_secondary_boot)(ARMCPU *cpu,
const struct arm_boot_info *info);
@@ -86,6 +91,16 @@ struct arm_boot_info {
* the user it should implement this hook.
*/
void (*modify_dtb)(const struct arm_boot_info *info, void *fdt);
+ /*
+ * If a board wants to use the QEMU emulated-firmware PSCI support,
+ * it should set this to QEMU_PSCI_CONDUIT_HVC or QEMU_PSCI_CONDUIT_SMC
+ * as appropriate. arm_load_kernel() will set the psci-conduit and
+ * start-powered-off properties on the CPUs accordingly.
+ * Note that if the guest image is started at the same exception level
+ * as the conduit specifies calls should go to (eg guest firmware booted
+ * to EL3) then PSCI will not be enabled.
+ */
+ int psci_conduit;
/* Used internally by arm_boot.c */
int is_linux;
hwaddr initrd_start;
@@ -168,4 +183,53 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
const struct arm_boot_info *info,
hwaddr mvbar_addr);
+typedef enum {
+ FIXUP_NONE = 0, /* do nothing */
+ FIXUP_TERMINATOR, /* end of insns */
+ FIXUP_BOARDID, /* overwrite with board ID number */
+ FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */
+ FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */
+ FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */
+ FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */
+ FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */
+ FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */
+ FIXUP_BOOTREG, /* overwrite with boot register address */
+ FIXUP_DSB, /* overwrite with correct DSB insn for cpu */
+ FIXUP_MAX,
+} FixupType;
+
+typedef struct ARMInsnFixup {
+ uint32_t insn;
+ FixupType fixup;
+} ARMInsnFixup;
+
+/**
+ * arm_write_bootloader - write a bootloader to guest memory
+ * @name: name of the bootloader blob
+ * @as: AddressSpace to write the bootloader
+ * @addr: guest address to write it
+ * @insns: the blob to be loaded
+ * @fixupcontext: context to be used for any fixups in @insns
+ *
+ * Write a bootloader to guest memory at address @addr in the address
+ * space @as. @name is the name to use for the resulting ROM blob, so
+ * it should be unique in the system and reasonably identifiable for debugging.
+ *
+ * @insns must be an array of ARMInsnFixup structs, each of which has
+ * one 32-bit value to be written to the guest memory, and a fixup to be
+ * applied to the value. FIXUP_NONE (do nothing) is value 0, so effectively
+ * the fixup is optional when writing a struct initializer.
+ * The final entry in the array must be { 0, FIXUP_TERMINATOR }.
+ *
+ * All other supported fixup types have the semantics "ignore insn
+ * and instead use the value from the array element @fixupcontext[fixup]".
+ * The caller should therefore provide @fixupcontext as an array of
+ * size FIXUP_MAX whose elements have been initialized for at least
+ * the entries that @insns refers to.
+ */
+void arm_write_bootloader(const char *name,
+ AddressSpace *as, hwaddr addr,
+ const ARMInsnFixup *insns,
+ const uint32_t *fixupcontext);
+
#endif /* HW_ARM_BOOT_H */
diff --git a/include/hw/arm/bsa.h b/include/hw/arm/bsa.h
new file mode 100644
index 0000000000..8eaab603c0
--- /dev/null
+++ b/include/hw/arm/bsa.h
@@ -0,0 +1,35 @@
+/*
+ * Common definitions for Arm Base System Architecture (BSA) platforms.
+ *
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_ARM_BSA_H
+#define QEMU_ARM_BSA_H
+
+/* These are architectural INTID values */
+#define VIRTUAL_PMU_IRQ 23
+#define ARCH_GIC_MAINT_IRQ 25
+#define ARCH_TIMER_NS_EL2_IRQ 26
+#define ARCH_TIMER_VIRT_IRQ 27
+#define ARCH_TIMER_NS_EL2_VIRT_IRQ 28
+#define ARCH_TIMER_S_EL1_IRQ 29
+#define ARCH_TIMER_NS_EL1_IRQ 30
+
+#define INTID_TO_PPI(irq) ((irq) - 16)
+
+#endif /* QEMU_ARM_BSA_H */
diff --git a/include/hw/arm/digic.h b/include/hw/arm/digic.h
index 63785baaa8..8f2735c284 100644
--- a/include/hw/arm/digic.h
+++ b/include/hw/arm/digic.h
@@ -21,14 +21,15 @@
#include "cpu.h"
#include "hw/timer/digic-timer.h"
#include "hw/char/digic-uart.h"
+#include "qom/object.h"
#define TYPE_DIGIC "digic"
-#define DIGIC(obj) OBJECT_CHECK(DigicState, (obj), TYPE_DIGIC)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicState, DIGIC)
#define DIGIC4_NB_TIMERS 3
-typedef struct DigicState {
+struct DigicState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
@@ -37,6 +38,6 @@ typedef struct DigicState {
DigicTimerState timer[DIGIC4_NB_TIMERS];
DigicUartState uart;
-} DigicState;
+};
#endif /* HW_ARM_DIGIC_H */
diff --git a/include/hw/arm/exynos4210.h b/include/hw/arm/exynos4210.h
index 55260394af..d33fe38586 100644
--- a/include/hw/arm/exynos4210.h
+++ b/include/hw/arm/exynos4210.h
@@ -26,7 +26,12 @@
#include "hw/or-irq.h"
#include "hw/sysbus.h"
-#include "target/arm/cpu-qom.h"
+#include "hw/cpu/a9mpcore.h"
+#include "hw/intc/exynos4210_gic.h"
+#include "hw/intc/exynos4210_combiner.h"
+#include "hw/core/split-irq.h"
+#include "hw/arm/boot.h"
+#include "qom/object.h"
#define EXYNOS4210_NCPUS 2
@@ -64,34 +69,25 @@
#define EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ \
(EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ * 8)
-#define EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit) ((grp)*8 + (bit))
-#define EXYNOS4210_COMBINER_GET_GRP_NUM(irq) ((irq) / 8)
-#define EXYNOS4210_COMBINER_GET_BIT_NUM(irq) \
- ((irq) - 8 * EXYNOS4210_COMBINER_GET_GRP_NUM(irq))
-
-/* IRQs number for external and internal GIC */
-#define EXYNOS4210_EXT_GIC_NIRQ (160-32)
-#define EXYNOS4210_INT_GIC_NIRQ 64
-
#define EXYNOS4210_I2C_NUMBER 9
#define EXYNOS4210_NUM_DMA 3
-typedef struct Exynos4210Irq {
- qemu_irq int_combiner_irq[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
- qemu_irq ext_combiner_irq[EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ];
- qemu_irq int_gic_irq[EXYNOS4210_INT_GIC_NIRQ];
- qemu_irq ext_gic_irq[EXYNOS4210_EXT_GIC_NIRQ];
- qemu_irq board_irqs[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
-} Exynos4210Irq;
+/*
+ * We need one splitter for every external combiner input, plus
+ * one for every non-zero entry in combiner_grp_to_gic_id[],
+ * minus one for every external combiner ID in second or later
+ * places in a combinermap[] line.
+ * We'll assert in exynos4210_init_board_irqs() if this is wrong.
+ */
+#define EXYNOS4210_NUM_SPLITTERS (EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ + 38)
-typedef struct Exynos4210State {
+struct Exynos4210State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
ARMCPU *cpu[EXYNOS4210_NCPUS];
- Exynos4210Irq irqs;
- qemu_irq *irq_table;
+ qemu_irq irq_table[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
MemoryRegion chipid_mem;
MemoryRegion iram_mem;
@@ -100,23 +96,21 @@ typedef struct Exynos4210State {
MemoryRegion boot_secondary;
MemoryRegion bootreg_mem;
I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER];
- qemu_or_irq pl330_irq_orgate[EXYNOS4210_NUM_DMA];
-} Exynos4210State;
+ OrIRQState pl330_irq_orgate[EXYNOS4210_NUM_DMA];
+ OrIRQState cpu_irq_orgate[EXYNOS4210_NCPUS];
+ A9MPPrivState a9mpcore;
+ Exynos4210GicState ext_gic;
+ Exynos4210CombinerState int_combiner;
+ Exynos4210CombinerState ext_combiner;
+ SplitIRQ splitter[EXYNOS4210_NUM_SPLITTERS];
+};
#define TYPE_EXYNOS4210_SOC "exynos4210"
-#define EXYNOS4210_SOC(obj) \
- OBJECT_CHECK(Exynos4210State, obj, TYPE_EXYNOS4210_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210State, EXYNOS4210_SOC)
void exynos4210_write_secondary(ARMCPU *cpu,
const struct arm_boot_info *info);
-/* Initialize exynos4210 IRQ subsystem stub */
-qemu_irq *exynos4210_init_irq(Exynos4210Irq *env);
-
-/* Initialize board IRQs.
- * These IRQs contain splitted Int/External Combiner and External Gic IRQs */
-void exynos4210_init_board_irqs(Exynos4210Irq *s);
-
/* Get IRQ number from exynos4210 IRQ subsystem stub.
* To identify IRQ source use internal combiner group and bit number
* grp - group number
@@ -124,12 +118,6 @@ void exynos4210_init_board_irqs(Exynos4210Irq *s);
uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit);
/*
- * Get Combiner input GPIO into irqs structure
- */
-void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev,
- int ext);
-
-/*
* exynos4210 UART
*/
DeviceState *exynos4210_uart_create(hwaddr addr,
diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h
index 54ee1bfd78..df2f83980f 100644
--- a/include/hw/arm/fsl-imx25.h
+++ b/include/hw/arm/fsl-imx25.h
@@ -17,7 +17,6 @@
#ifndef FSL_IMX25_H
#define FSL_IMX25_H
-#include "hw/arm/boot.h"
#include "hw/intc/imx_avic.h"
#include "hw/misc/imx25_ccm.h"
#include "hw/char/imx_serial.h"
@@ -32,9 +31,10 @@
#include "hw/watchdog/wdt_imx2.h"
#include "exec/memory.h"
#include "target/arm/cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX25 "fsl,imx25"
-#define FSL_IMX25(obj) OBJECT_CHECK(FslIMX25State, (obj), TYPE_FSL_IMX25)
+#define TYPE_FSL_IMX25 "fsl-imx25"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX25State, FSL_IMX25)
#define FSL_IMX25_NUM_UARTS 5
#define FSL_IMX25_NUM_GPTS 4
@@ -44,7 +44,7 @@
#define FSL_IMX25_NUM_ESDHCS 2
#define FSL_IMX25_NUM_USBS 2
-typedef struct FslIMX25State {
+struct FslIMX25State {
/*< private >*/
DeviceState parent_obj;
@@ -66,7 +66,7 @@ typedef struct FslIMX25State {
MemoryRegion iram;
MemoryRegion iram_alias;
uint32_t phy_num;
-} FslIMX25State;
+};
/**
* i.MX25 memory map
@@ -178,7 +178,7 @@ typedef struct FslIMX25State {
* 0xBB00_0000 0xBB00_0FFF 4 Kbytes NAND flash main area buffer
* 0xBB00_1000 0xBB00_11FF 512 B NAND flash spare area buffer
* 0xBB00_1200 0xBB00_1DFF 3 Kbytes Reserved
- * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control regisers
+ * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control registers
* 0xBB01_2000 0xBFFF_FFFF 96 Mbytes (minus 8 Kbytes) Reserved
* 0xC000_0000 0xFFFF_FFFF 1024 Mbytes Reserved
*/
diff --git a/include/hw/arm/fsl-imx31.h b/include/hw/arm/fsl-imx31.h
index dd8561b309..40c593a5cf 100644
--- a/include/hw/arm/fsl-imx31.h
+++ b/include/hw/arm/fsl-imx31.h
@@ -17,7 +17,6 @@
#ifndef FSL_IMX31_H
#define FSL_IMX31_H
-#include "hw/arm/boot.h"
#include "hw/intc/imx_avic.h"
#include "hw/misc/imx31_ccm.h"
#include "hw/char/imx_serial.h"
@@ -28,16 +27,17 @@
#include "hw/watchdog/wdt_imx2.h"
#include "exec/memory.h"
#include "target/arm/cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX31 "fsl,imx31"
-#define FSL_IMX31(obj) OBJECT_CHECK(FslIMX31State, (obj), TYPE_FSL_IMX31)
+#define TYPE_FSL_IMX31 "fsl-imx31"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX31State, FSL_IMX31)
#define FSL_IMX31_NUM_UARTS 2
#define FSL_IMX31_NUM_EPITS 2
#define FSL_IMX31_NUM_I2CS 3
#define FSL_IMX31_NUM_GPIOS 3
-typedef struct FslIMX31State {
+struct FslIMX31State {
/*< private >*/
DeviceState parent_obj;
@@ -55,7 +55,7 @@ typedef struct FslIMX31State {
MemoryRegion rom;
MemoryRegion iram;
MemoryRegion iram_alias;
-} FslIMX31State;
+};
#define FSL_IMX31_SECURE_ROM_ADDR 0x00000000
#define FSL_IMX31_SECURE_ROM_SIZE 0x4000
diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h
index 162fe99375..61c593ffd2 100644
--- a/include/hw/arm/fsl-imx6.h
+++ b/include/hw/arm/fsl-imx6.h
@@ -17,10 +17,10 @@
#ifndef FSL_IMX6_H
#define FSL_IMX6_H
-#include "hw/arm/boot.h"
#include "hw/cpu/a9mpcore.h"
#include "hw/misc/imx6_ccm.h"
#include "hw/misc/imx6_src.h"
+#include "hw/misc/imx7_snvs.h"
#include "hw/watchdog/wdt_imx2.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
@@ -32,11 +32,13 @@
#include "hw/net/imx_fec.h"
#include "hw/usb/chipidea.h"
#include "hw/usb/imx-usb-phy.h"
+#include "hw/pci-host/designware.h"
#include "exec/memory.h"
#include "cpu.h"
+#include "qom/object.h"
-#define TYPE_FSL_IMX6 "fsl,imx6"
-#define FSL_IMX6(obj) OBJECT_CHECK(FslIMX6State, (obj), TYPE_FSL_IMX6)
+#define TYPE_FSL_IMX6 "fsl-imx6"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6State, FSL_IMX6)
#define FSL_IMX6_NUM_CPUS 4
#define FSL_IMX6_NUM_UARTS 5
@@ -49,32 +51,34 @@
#define FSL_IMX6_NUM_USB_PHYS 2
#define FSL_IMX6_NUM_USBS 4
-typedef struct FslIMX6State {
+struct FslIMX6State {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
- ARMCPU cpu[FSL_IMX6_NUM_CPUS];
- A9MPPrivState a9mpcore;
- IMX6CCMState ccm;
- IMX6SRCState src;
- IMXSerialState uart[FSL_IMX6_NUM_UARTS];
- IMXGPTState gpt;
- IMXEPITState epit[FSL_IMX6_NUM_EPITS];
- IMXI2CState i2c[FSL_IMX6_NUM_I2CS];
- IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS];
- SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
- IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
- IMX2WdtState wdt[FSL_IMX6_NUM_WDTS];
- IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS];
- ChipideaState usb[FSL_IMX6_NUM_USBS];
- IMXFECState eth;
- MemoryRegion rom;
- MemoryRegion caam;
- MemoryRegion ocram;
- MemoryRegion ocram_alias;
- uint32_t phy_num;
-} FslIMX6State;
+ ARMCPU cpu[FSL_IMX6_NUM_CPUS];
+ A9MPPrivState a9mpcore;
+ IMX6CCMState ccm;
+ IMX6SRCState src;
+ IMX7SNVSState snvs;
+ IMXSerialState uart[FSL_IMX6_NUM_UARTS];
+ IMXGPTState gpt;
+ IMXEPITState epit[FSL_IMX6_NUM_EPITS];
+ IMXI2CState i2c[FSL_IMX6_NUM_I2CS];
+ IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS];
+ SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
+ IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
+ IMX2WdtState wdt[FSL_IMX6_NUM_WDTS];
+ IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS];
+ ChipideaState usb[FSL_IMX6_NUM_USBS];
+ IMXFECState eth;
+ DesignwarePCIEHost pcie;
+ MemoryRegion rom;
+ MemoryRegion caam;
+ MemoryRegion ocram;
+ MemoryRegion ocram_alias;
+ uint32_t phy_num;
+};
#define FSL_IMX6_MMDC_ADDR 0x10000000
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
index fcbaf3dc86..8277b0e8b2 100644
--- a/include/hw/arm/fsl-imx6ul.h
+++ b/include/hw/arm/fsl-imx6ul.h
@@ -17,12 +17,10 @@
#ifndef FSL_IMX6UL_H
#define FSL_IMX6UL_H
-#include "hw/arm/boot.h"
#include "hw/cpu/a15mpcore.h"
#include "hw/misc/imx6ul_ccm.h"
#include "hw/misc/imx6_src.h"
#include "hw/misc/imx7_snvs.h"
-#include "hw/misc/imx7_gpr.h"
#include "hw/intc/imx_gpcv2.h"
#include "hw/watchdog/wdt_imx2.h"
#include "hw/gpio/imx_gpio.h"
@@ -30,7 +28,6 @@
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
#include "hw/i2c/imx_i2c.h"
-#include "hw/gpio/imx_gpio.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/imx_spi.h"
#include "hw/net/imx_fec.h"
@@ -38,9 +35,11 @@
#include "hw/usb/imx-usb-phy.h"
#include "exec/memory.h"
#include "cpu.h"
+#include "qom/object.h"
+#include "qemu/units.h"
-#define TYPE_FSL_IMX6UL "fsl,imx6ul"
-#define FSL_IMX6UL(obj) OBJECT_CHECK(FslIMX6ULState, (obj), TYPE_FSL_IMX6UL)
+#define TYPE_FSL_IMX6UL "fsl-imx6ul"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX6ULState, FSL_IMX6UL)
enum FslIMX6ULConfiguration {
FSL_IMX6UL_NUM_CPUS = 1,
@@ -58,9 +57,12 @@ enum FslIMX6ULConfiguration {
FSL_IMX6UL_NUM_ADCS = 2,
FSL_IMX6UL_NUM_USB_PHYS = 2,
FSL_IMX6UL_NUM_USBS = 2,
+ FSL_IMX6UL_NUM_SAIS = 3,
+ FSL_IMX6UL_NUM_CANS = 2,
+ FSL_IMX6UL_NUM_PWMS = 8,
};
-typedef struct FslIMX6ULState {
+struct FslIMX6ULState {
/*< private >*/
DeviceState parent_obj;
@@ -74,7 +76,6 @@ typedef struct FslIMX6ULState {
IMX6SRCState src;
IMX7SNVSState snvs;
IMXGPCv2State gpcv2;
- IMX7GPRState gpr;
IMXSPIState spi[FSL_IMX6UL_NUM_ECSPIS];
IMXI2CState i2c[FSL_IMX6UL_NUM_I2CS];
IMXSerialState uart[FSL_IMX6UL_NUM_UARTS];
@@ -89,123 +90,234 @@ typedef struct FslIMX6ULState {
MemoryRegion ocram_alias;
uint32_t phy_num[FSL_IMX6UL_NUM_ETHS];
-} FslIMX6ULState;
+ bool phy_connected[FSL_IMX6UL_NUM_ETHS];
+};
enum FslIMX6ULMemoryMap {
FSL_IMX6UL_MMDC_ADDR = 0x80000000,
- FSL_IMX6UL_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
+ FSL_IMX6UL_MMDC_SIZE = (2 * GiB),
FSL_IMX6UL_QSPI1_MEM_ADDR = 0x60000000,
+ FSL_IMX6UL_QSPI1_MEM_SIZE = (256 * MiB),
+
FSL_IMX6UL_EIM_ALIAS_ADDR = 0x58000000,
+ FSL_IMX6UL_EIM_ALIAS_SIZE = (128 * MiB),
+
FSL_IMX6UL_EIM_CS_ADDR = 0x50000000,
+ FSL_IMX6UL_EIM_CS_SIZE = (128 * MiB),
+
FSL_IMX6UL_AES_ENCRYPT_ADDR = 0x10000000,
+ FSL_IMX6UL_AES_ENCRYPT_SIZE = (1 * MiB),
+
FSL_IMX6UL_QSPI1_RX_ADDR = 0x0C000000,
+ FSL_IMX6UL_QSPI1_RX_SIZE = (32 * MiB),
- /* AIPS-2 */
+ /* AIPS-2 Begin */
FSL_IMX6UL_UART6_ADDR = 0x021FC000,
+
FSL_IMX6UL_I2C4_ADDR = 0x021F8000,
+
FSL_IMX6UL_UART5_ADDR = 0x021F4000,
FSL_IMX6UL_UART4_ADDR = 0x021F0000,
FSL_IMX6UL_UART3_ADDR = 0x021EC000,
FSL_IMX6UL_UART2_ADDR = 0x021E8000,
+
FSL_IMX6UL_WDOG3_ADDR = 0x021E4000,
+
FSL_IMX6UL_QSPI_ADDR = 0x021E0000,
+ FSL_IMX6UL_QSPI_SIZE = 0x500,
+
FSL_IMX6UL_SYS_CNT_CTRL_ADDR = 0x021DC000,
+ FSL_IMX6UL_SYS_CNT_CTRL_SIZE = (16 * KiB),
+
FSL_IMX6UL_SYS_CNT_CMP_ADDR = 0x021D8000,
+ FSL_IMX6UL_SYS_CNT_CMP_SIZE = (16 * KiB),
+
FSL_IMX6UL_SYS_CNT_RD_ADDR = 0x021D4000,
+ FSL_IMX6UL_SYS_CNT_RD_SIZE = (16 * KiB),
+
FSL_IMX6UL_TZASC_ADDR = 0x021D0000,
+ FSL_IMX6UL_TZASC_SIZE = (16 * KiB),
+
FSL_IMX6UL_PXP_ADDR = 0x021CC000,
+ FSL_IMX6UL_PXP_SIZE = (16 * KiB),
+
FSL_IMX6UL_LCDIF_ADDR = 0x021C8000,
+ FSL_IMX6UL_LCDIF_SIZE = 0x100,
+
FSL_IMX6UL_CSI_ADDR = 0x021C4000,
+ FSL_IMX6UL_CSI_SIZE = 0x100,
+
FSL_IMX6UL_CSU_ADDR = 0x021C0000,
+ FSL_IMX6UL_CSU_SIZE = (16 * KiB),
+
FSL_IMX6UL_OCOTP_CTRL_ADDR = 0x021BC000,
+ FSL_IMX6UL_OCOTP_CTRL_SIZE = (4 * KiB),
+
FSL_IMX6UL_EIM_ADDR = 0x021B8000,
+ FSL_IMX6UL_EIM_SIZE = 0x100,
+
FSL_IMX6UL_SIM2_ADDR = 0x021B4000,
+
FSL_IMX6UL_MMDC_CFG_ADDR = 0x021B0000,
+ FSL_IMX6UL_MMDC_CFG_SIZE = (4 * KiB),
+
FSL_IMX6UL_ROMCP_ADDR = 0x021AC000,
+ FSL_IMX6UL_ROMCP_SIZE = 0x300,
+
FSL_IMX6UL_I2C3_ADDR = 0x021A8000,
FSL_IMX6UL_I2C2_ADDR = 0x021A4000,
FSL_IMX6UL_I2C1_ADDR = 0x021A0000,
+
FSL_IMX6UL_ADC2_ADDR = 0x0219C000,
FSL_IMX6UL_ADC1_ADDR = 0x02198000,
+ FSL_IMX6UL_ADCn_SIZE = 0x100,
+
FSL_IMX6UL_USDHC2_ADDR = 0x02194000,
FSL_IMX6UL_USDHC1_ADDR = 0x02190000,
+
FSL_IMX6UL_SIM1_ADDR = 0x0218C000,
+ FSL_IMX6UL_SIMn_SIZE = (16 * KiB),
+
FSL_IMX6UL_ENET1_ADDR = 0x02188000,
+
FSL_IMX6UL_USBO2_USBMISC_ADDR = 0x02184800,
- FSL_IMX6UL_USBO2_USB_ADDR = 0x02184000,
+ FSL_IMX6UL_USBO2_USBMISC_SIZE = 0x200,
+
+ FSL_IMX6UL_USBO2_USB1_ADDR = 0x02184000,
+ FSL_IMX6UL_USBO2_USB2_ADDR = 0x02184200,
+
FSL_IMX6UL_USBO2_PL301_ADDR = 0x02180000,
+ FSL_IMX6UL_USBO2_PL301_SIZE = (16 * KiB),
+
FSL_IMX6UL_AIPS2_CFG_ADDR = 0x0217C000,
+ FSL_IMX6UL_AIPS2_CFG_SIZE = 0x100,
+
FSL_IMX6UL_CAAM_ADDR = 0x02140000,
+ FSL_IMX6UL_CAAM_SIZE = (16 * KiB),
+
FSL_IMX6UL_A7MPCORE_DAP_ADDR = 0x02100000,
+ FSL_IMX6UL_A7MPCORE_DAP_SIZE = (4 * KiB),
+ /* AIPS-2 End */
- /* AIPS-1 */
+ /* AIPS-1 Begin */
FSL_IMX6UL_PWM8_ADDR = 0x020FC000,
FSL_IMX6UL_PWM7_ADDR = 0x020F8000,
FSL_IMX6UL_PWM6_ADDR = 0x020F4000,
FSL_IMX6UL_PWM5_ADDR = 0x020F0000,
+
FSL_IMX6UL_SDMA_ADDR = 0x020EC000,
+ FSL_IMX6UL_SDMA_SIZE = 0x300,
+
FSL_IMX6UL_GPT2_ADDR = 0x020E8000,
+
FSL_IMX6UL_IOMUXC_GPR_ADDR = 0x020E4000,
+ FSL_IMX6UL_IOMUXC_GPR_SIZE = 0x40,
+
FSL_IMX6UL_IOMUXC_ADDR = 0x020E0000,
+ FSL_IMX6UL_IOMUXC_SIZE = 0x700,
+
FSL_IMX6UL_GPC_ADDR = 0x020DC000,
+
FSL_IMX6UL_SRC_ADDR = 0x020D8000,
+
FSL_IMX6UL_EPIT2_ADDR = 0x020D4000,
FSL_IMX6UL_EPIT1_ADDR = 0x020D0000,
+
FSL_IMX6UL_SNVS_HP_ADDR = 0x020CC000,
+
FSL_IMX6UL_USBPHY2_ADDR = 0x020CA000,
- FSL_IMX6UL_USBPHY2_SIZE = (4 * 1024),
FSL_IMX6UL_USBPHY1_ADDR = 0x020C9000,
- FSL_IMX6UL_USBPHY1_SIZE = (4 * 1024),
+
FSL_IMX6UL_ANALOG_ADDR = 0x020C8000,
+ FSL_IMX6UL_ANALOG_SIZE = 0x300,
+
FSL_IMX6UL_CCM_ADDR = 0x020C4000,
+
FSL_IMX6UL_WDOG2_ADDR = 0x020C0000,
FSL_IMX6UL_WDOG1_ADDR = 0x020BC000,
+
FSL_IMX6UL_KPP_ADDR = 0x020B8000,
+ FSL_IMX6UL_KPP_SIZE = 0x10,
+
FSL_IMX6UL_ENET2_ADDR = 0x020B4000,
+
FSL_IMX6UL_SNVS_LP_ADDR = 0x020B0000,
+ FSL_IMX6UL_SNVS_LP_SIZE = (16 * KiB),
+
FSL_IMX6UL_GPIO5_ADDR = 0x020AC000,
FSL_IMX6UL_GPIO4_ADDR = 0x020A8000,
FSL_IMX6UL_GPIO3_ADDR = 0x020A4000,
FSL_IMX6UL_GPIO2_ADDR = 0x020A0000,
FSL_IMX6UL_GPIO1_ADDR = 0x0209C000,
+
FSL_IMX6UL_GPT1_ADDR = 0x02098000,
+
FSL_IMX6UL_CAN2_ADDR = 0x02094000,
FSL_IMX6UL_CAN1_ADDR = 0x02090000,
+ FSL_IMX6UL_CANn_SIZE = (4 * KiB),
+
FSL_IMX6UL_PWM4_ADDR = 0x0208C000,
FSL_IMX6UL_PWM3_ADDR = 0x02088000,
FSL_IMX6UL_PWM2_ADDR = 0x02084000,
FSL_IMX6UL_PWM1_ADDR = 0x02080000,
+ FSL_IMX6UL_PWMn_SIZE = 0x20,
+
FSL_IMX6UL_AIPS1_CFG_ADDR = 0x0207C000,
+ FSL_IMX6UL_AIPS1_CFG_SIZE = (16 * KiB),
+
FSL_IMX6UL_BEE_ADDR = 0x02044000,
+ FSL_IMX6UL_BEE_SIZE = (16 * KiB),
+
FSL_IMX6UL_TOUCH_CTRL_ADDR = 0x02040000,
+ FSL_IMX6UL_TOUCH_CTRL_SIZE = 0x100,
+
FSL_IMX6UL_SPBA_ADDR = 0x0203C000,
+ FSL_IMX6UL_SPBA_SIZE = 0x100,
+
FSL_IMX6UL_ASRC_ADDR = 0x02034000,
+ FSL_IMX6UL_ASRC_SIZE = 0x100,
+
FSL_IMX6UL_SAI3_ADDR = 0x02030000,
FSL_IMX6UL_SAI2_ADDR = 0x0202C000,
FSL_IMX6UL_SAI1_ADDR = 0x02028000,
+ FSL_IMX6UL_SAIn_SIZE = 0x200,
+
FSL_IMX6UL_UART8_ADDR = 0x02024000,
FSL_IMX6UL_UART1_ADDR = 0x02020000,
FSL_IMX6UL_UART7_ADDR = 0x02018000,
+
FSL_IMX6UL_ECSPI4_ADDR = 0x02014000,
FSL_IMX6UL_ECSPI3_ADDR = 0x02010000,
FSL_IMX6UL_ECSPI2_ADDR = 0x0200C000,
FSL_IMX6UL_ECSPI1_ADDR = 0x02008000,
+
FSL_IMX6UL_SPDIF_ADDR = 0x02004000,
+ FSL_IMX6UL_SPDIF_SIZE = 0x100,
+ /* AIPS-1 End */
+
+ FSL_IMX6UL_BCH_ADDR = 0x01808000,
+ FSL_IMX6UL_BCH_SIZE = 0x200,
+
+ FSL_IMX6UL_GPMI_ADDR = 0x01806000,
+ FSL_IMX6UL_GPMI_SIZE = 0x200,
FSL_IMX6UL_APBH_DMA_ADDR = 0x01804000,
- FSL_IMX6UL_APBH_DMA_SIZE = (32 * 1024),
+ FSL_IMX6UL_APBH_DMA_SIZE = (4 * KiB),
FSL_IMX6UL_A7MPCORE_ADDR = 0x00A00000,
FSL_IMX6UL_OCRAM_ALIAS_ADDR = 0x00920000,
- FSL_IMX6UL_OCRAM_ALIAS_SIZE = 0x00060000,
+ FSL_IMX6UL_OCRAM_ALIAS_SIZE = (384 * KiB),
+
FSL_IMX6UL_OCRAM_MEM_ADDR = 0x00900000,
- FSL_IMX6UL_OCRAM_MEM_SIZE = 0x00020000,
+ FSL_IMX6UL_OCRAM_MEM_SIZE = (128 * KiB),
+
FSL_IMX6UL_CAAM_MEM_ADDR = 0x00100000,
- FSL_IMX6UL_CAAM_MEM_SIZE = 0x00008000,
+ FSL_IMX6UL_CAAM_MEM_SIZE = (32 * KiB),
+
FSL_IMX6UL_ROM_ADDR = 0x00000000,
- FSL_IMX6UL_ROM_SIZE = 0x00018000,
+ FSL_IMX6UL_ROM_SIZE = (96 * KiB),
};
enum FslIMX6ULIRQs {
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
index ad88923707..411fa1c2e3 100644
--- a/include/hw/arm/fsl-imx7.h
+++ b/include/hw/arm/fsl-imx7.h
@@ -19,29 +19,29 @@
#ifndef FSL_IMX7_H
#define FSL_IMX7_H
-#include "hw/arm/boot.h"
#include "hw/cpu/a15mpcore.h"
#include "hw/intc/imx_gpcv2.h"
#include "hw/misc/imx7_ccm.h"
#include "hw/misc/imx7_snvs.h"
#include "hw/misc/imx7_gpr.h"
-#include "hw/misc/imx6_src.h"
+#include "hw/misc/imx7_src.h"
#include "hw/watchdog/wdt_imx2.h"
#include "hw/gpio/imx_gpio.h"
#include "hw/char/imx_serial.h"
#include "hw/timer/imx_gpt.h"
#include "hw/timer/imx_epit.h"
#include "hw/i2c/imx_i2c.h"
-#include "hw/gpio/imx_gpio.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/imx_spi.h"
#include "hw/net/imx_fec.h"
#include "hw/pci-host/designware.h"
#include "hw/usb/chipidea.h"
#include "cpu.h"
+#include "qom/object.h"
+#include "qemu/units.h"
-#define TYPE_FSL_IMX7 "fsl,imx7"
-#define FSL_IMX7(obj) OBJECT_CHECK(FslIMX7State, (obj), TYPE_FSL_IMX7)
+#define TYPE_FSL_IMX7 "fsl-imx7"
+OBJECT_DECLARE_SIMPLE_TYPE(FslIMX7State, FSL_IMX7)
enum FslIMX7Configuration {
FSL_IMX7_NUM_CPUS = 2,
@@ -57,9 +57,12 @@ enum FslIMX7Configuration {
FSL_IMX7_NUM_ECSPIS = 4,
FSL_IMX7_NUM_USBS = 3,
FSL_IMX7_NUM_ADCS = 2,
+ FSL_IMX7_NUM_SAIS = 3,
+ FSL_IMX7_NUM_CANS = 2,
+ FSL_IMX7_NUM_PWMS = 4,
};
-typedef struct FslIMX7State {
+struct FslIMX7State {
/*< private >*/
DeviceState parent_obj;
@@ -71,6 +74,7 @@ typedef struct FslIMX7State {
IMX7CCMState ccm;
IMX7AnalogState analog;
IMX7SNVSState snvs;
+ IMX7SRCState src;
IMXGPCv2State gpcv2;
IMXSPIState spi[FSL_IMX7_NUM_ECSPIS];
IMXI2CState i2c[FSL_IMX7_NUM_I2CS];
@@ -81,126 +85,293 @@ typedef struct FslIMX7State {
IMX7GPRState gpr;
ChipideaState usb[FSL_IMX7_NUM_USBS];
DesignwarePCIEHost pcie;
+ MemoryRegion rom;
+ MemoryRegion caam;
+ MemoryRegion ocram;
+ MemoryRegion ocram_epdc;
+ MemoryRegion ocram_pxp;
+ MemoryRegion ocram_s;
+
uint32_t phy_num[FSL_IMX7_NUM_ETHS];
-} FslIMX7State;
+ bool phy_connected[FSL_IMX7_NUM_ETHS];
+};
enum FslIMX7MemoryMap {
FSL_IMX7_MMDC_ADDR = 0x80000000,
- FSL_IMX7_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
+ FSL_IMX7_MMDC_SIZE = (2 * GiB),
- FSL_IMX7_GPIO1_ADDR = 0x30200000,
- FSL_IMX7_GPIO2_ADDR = 0x30210000,
- FSL_IMX7_GPIO3_ADDR = 0x30220000,
- FSL_IMX7_GPIO4_ADDR = 0x30230000,
- FSL_IMX7_GPIO5_ADDR = 0x30240000,
- FSL_IMX7_GPIO6_ADDR = 0x30250000,
- FSL_IMX7_GPIO7_ADDR = 0x30260000,
+ FSL_IMX7_QSPI1_MEM_ADDR = 0x60000000,
+ FSL_IMX7_QSPI1_MEM_SIZE = (256 * MiB),
- FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
+ FSL_IMX7_PCIE1_MEM_ADDR = 0x40000000,
+ FSL_IMX7_PCIE1_MEM_SIZE = (256 * MiB),
- FSL_IMX7_WDOG1_ADDR = 0x30280000,
- FSL_IMX7_WDOG2_ADDR = 0x30290000,
- FSL_IMX7_WDOG3_ADDR = 0x302A0000,
- FSL_IMX7_WDOG4_ADDR = 0x302B0000,
+ FSL_IMX7_QSPI1_RX_BUF_ADDR = 0x34000000,
+ FSL_IMX7_QSPI1_RX_BUF_SIZE = (32 * MiB),
- FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
+ /* PCIe Peripherals */
+ FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
- FSL_IMX7_GPT1_ADDR = 0x302D0000,
- FSL_IMX7_GPT2_ADDR = 0x302E0000,
- FSL_IMX7_GPT3_ADDR = 0x302F0000,
- FSL_IMX7_GPT4_ADDR = 0x30300000,
+ /* MMAP Peripherals */
+ FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
+ FSL_IMX7_DMA_APBH_SIZE = 0x8000,
+
+ /* GPV configuration */
+ FSL_IMX7_GPV6_ADDR = 0x32600000,
+ FSL_IMX7_GPV5_ADDR = 0x32500000,
+ FSL_IMX7_GPV4_ADDR = 0x32400000,
+ FSL_IMX7_GPV3_ADDR = 0x32300000,
+ FSL_IMX7_GPV2_ADDR = 0x32200000,
+ FSL_IMX7_GPV1_ADDR = 0x32100000,
+ FSL_IMX7_GPV0_ADDR = 0x32000000,
+ FSL_IMX7_GPVn_SIZE = (1 * MiB),
+
+ /* Arm Peripherals */
+ FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
- FSL_IMX7_IOMUXC_ADDR = 0x30330000,
- FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
- FSL_IMX7_IOMUXCn_SIZE = 0x1000,
+ /* AIPS-3 Begin */
- FSL_IMX7_OCOTP_ADDR = 0x30350000,
- FSL_IMX7_OCOTP_SIZE = 0x10000,
+ FSL_IMX7_ENET2_ADDR = 0x30BF0000,
+ FSL_IMX7_ENET1_ADDR = 0x30BE0000,
- FSL_IMX7_ANALOG_ADDR = 0x30360000,
- FSL_IMX7_SNVS_ADDR = 0x30370000,
- FSL_IMX7_CCM_ADDR = 0x30380000,
+ FSL_IMX7_SDMA_ADDR = 0x30BD0000,
+ FSL_IMX7_SDMA_SIZE = (4 * KiB),
- FSL_IMX7_SRC_ADDR = 0x30390000,
- FSL_IMX7_SRC_SIZE = 0x1000,
+ FSL_IMX7_EIM_ADDR = 0x30BC0000,
+ FSL_IMX7_EIM_SIZE = (4 * KiB),
- FSL_IMX7_ADC1_ADDR = 0x30610000,
- FSL_IMX7_ADC2_ADDR = 0x30620000,
- FSL_IMX7_ADCn_SIZE = 0x1000,
+ FSL_IMX7_QSPI_ADDR = 0x30BB0000,
+ FSL_IMX7_QSPI_SIZE = 0x8000,
- FSL_IMX7_PWM1_ADDR = 0x30660000,
- FSL_IMX7_PWM2_ADDR = 0x30670000,
- FSL_IMX7_PWM3_ADDR = 0x30680000,
- FSL_IMX7_PWM4_ADDR = 0x30690000,
- FSL_IMX7_PWMn_SIZE = 0x10000,
+ FSL_IMX7_SIM2_ADDR = 0x30BA0000,
+ FSL_IMX7_SIM1_ADDR = 0x30B90000,
+ FSL_IMX7_SIMn_SIZE = (4 * KiB),
- FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
- FSL_IMX7_PCIE_PHY_SIZE = 0x10000,
+ FSL_IMX7_USDHC3_ADDR = 0x30B60000,
+ FSL_IMX7_USDHC2_ADDR = 0x30B50000,
+ FSL_IMX7_USDHC1_ADDR = 0x30B40000,
- FSL_IMX7_GPC_ADDR = 0x303A0000,
+ FSL_IMX7_USB3_ADDR = 0x30B30000,
+ FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
+ FSL_IMX7_USB2_ADDR = 0x30B20000,
+ FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
+ FSL_IMX7_USB1_ADDR = 0x30B10000,
+ FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
+ FSL_IMX7_USBMISCn_SIZE = 0x200,
- FSL_IMX7_CAAM_ADDR = 0x30900000,
- FSL_IMX7_CAAM_SIZE = 0x40000,
+ FSL_IMX7_USB_PL301_ADDR = 0x30AD0000,
+ FSL_IMX7_USB_PL301_SIZE = (64 * KiB),
- FSL_IMX7_CAN1_ADDR = 0x30A00000,
- FSL_IMX7_CAN2_ADDR = 0x30A10000,
- FSL_IMX7_CANn_SIZE = 0x10000,
+ FSL_IMX7_SEMAPHORE_HS_ADDR = 0x30AC0000,
+ FSL_IMX7_SEMAPHORE_HS_SIZE = (64 * KiB),
+
+ FSL_IMX7_MUB_ADDR = 0x30AB0000,
+ FSL_IMX7_MUA_ADDR = 0x30AA0000,
+ FSL_IMX7_MUn_SIZE = (KiB),
+
+ FSL_IMX7_UART7_ADDR = 0x30A90000,
+ FSL_IMX7_UART6_ADDR = 0x30A80000,
+ FSL_IMX7_UART5_ADDR = 0x30A70000,
+ FSL_IMX7_UART4_ADDR = 0x30A60000,
- FSL_IMX7_I2C1_ADDR = 0x30A20000,
- FSL_IMX7_I2C2_ADDR = 0x30A30000,
- FSL_IMX7_I2C3_ADDR = 0x30A40000,
FSL_IMX7_I2C4_ADDR = 0x30A50000,
+ FSL_IMX7_I2C3_ADDR = 0x30A40000,
+ FSL_IMX7_I2C2_ADDR = 0x30A30000,
+ FSL_IMX7_I2C1_ADDR = 0x30A20000,
- FSL_IMX7_ECSPI1_ADDR = 0x30820000,
- FSL_IMX7_ECSPI2_ADDR = 0x30830000,
- FSL_IMX7_ECSPI3_ADDR = 0x30840000,
- FSL_IMX7_ECSPI4_ADDR = 0x30630000,
+ FSL_IMX7_CAN2_ADDR = 0x30A10000,
+ FSL_IMX7_CAN1_ADDR = 0x30A00000,
+ FSL_IMX7_CANn_SIZE = (4 * KiB),
- FSL_IMX7_LCDIF_ADDR = 0x30730000,
- FSL_IMX7_LCDIF_SIZE = 0x1000,
+ FSL_IMX7_AIPS3_CONF_ADDR = 0x309F0000,
+ FSL_IMX7_AIPS3_CONF_SIZE = (64 * KiB),
- FSL_IMX7_UART1_ADDR = 0x30860000,
+ FSL_IMX7_CAAM_ADDR = 0x30900000,
+ FSL_IMX7_CAAM_SIZE = (256 * KiB),
+
+ FSL_IMX7_SPBA_ADDR = 0x308F0000,
+ FSL_IMX7_SPBA_SIZE = (4 * KiB),
+
+ FSL_IMX7_SAI3_ADDR = 0x308C0000,
+ FSL_IMX7_SAI2_ADDR = 0x308B0000,
+ FSL_IMX7_SAI1_ADDR = 0x308A0000,
+ FSL_IMX7_SAIn_SIZE = (4 * KiB),
+
+ FSL_IMX7_UART3_ADDR = 0x30880000,
/*
* Some versions of the reference manual claim that UART2 is @
* 0x30870000, but experiments with HW + DT files in upstream
* Linux kernel show that not to be true and that block is
- * acutally located @ 0x30890000
+ * actually located @ 0x30890000
*/
FSL_IMX7_UART2_ADDR = 0x30890000,
- FSL_IMX7_UART3_ADDR = 0x30880000,
- FSL_IMX7_UART4_ADDR = 0x30A60000,
- FSL_IMX7_UART5_ADDR = 0x30A70000,
- FSL_IMX7_UART6_ADDR = 0x30A80000,
- FSL_IMX7_UART7_ADDR = 0x30A90000,
+ FSL_IMX7_UART1_ADDR = 0x30860000,
- FSL_IMX7_ENET1_ADDR = 0x30BE0000,
- FSL_IMX7_ENET2_ADDR = 0x30BF0000,
+ FSL_IMX7_ECSPI3_ADDR = 0x30840000,
+ FSL_IMX7_ECSPI2_ADDR = 0x30830000,
+ FSL_IMX7_ECSPI1_ADDR = 0x30820000,
+ FSL_IMX7_ECSPIn_SIZE = (4 * KiB),
- FSL_IMX7_USB1_ADDR = 0x30B10000,
- FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
- FSL_IMX7_USB2_ADDR = 0x30B20000,
- FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
- FSL_IMX7_USB3_ADDR = 0x30B30000,
- FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
- FSL_IMX7_USBMISCn_SIZE = 0x200,
+ /* AIPS-3 End */
- FSL_IMX7_USDHC1_ADDR = 0x30B40000,
- FSL_IMX7_USDHC2_ADDR = 0x30B50000,
- FSL_IMX7_USDHC3_ADDR = 0x30B60000,
+ /* AIPS-2 Begin */
- FSL_IMX7_SDMA_ADDR = 0x30BD0000,
- FSL_IMX7_SDMA_SIZE = 0x1000,
+ FSL_IMX7_AXI_DEBUG_MON_ADDR = 0x307E0000,
+ FSL_IMX7_AXI_DEBUG_MON_SIZE = (64 * KiB),
+
+ FSL_IMX7_PERFMON2_ADDR = 0x307D0000,
+ FSL_IMX7_PERFMON1_ADDR = 0x307C0000,
+ FSL_IMX7_PERFMONn_SIZE = (64 * KiB),
+
+ FSL_IMX7_DDRC_ADDR = 0x307A0000,
+ FSL_IMX7_DDRC_SIZE = (4 * KiB),
+
+ FSL_IMX7_DDRC_PHY_ADDR = 0x30790000,
+ FSL_IMX7_DDRC_PHY_SIZE = (4 * KiB),
+
+ FSL_IMX7_TZASC_ADDR = 0x30780000,
+ FSL_IMX7_TZASC_SIZE = (64 * KiB),
+
+ FSL_IMX7_MIPI_DSI_ADDR = 0x30760000,
+ FSL_IMX7_MIPI_DSI_SIZE = (4 * KiB),
+
+ FSL_IMX7_MIPI_CSI_ADDR = 0x30750000,
+ FSL_IMX7_MIPI_CSI_SIZE = 0x4000,
+
+ FSL_IMX7_LCDIF_ADDR = 0x30730000,
+ FSL_IMX7_LCDIF_SIZE = 0x8000,
+
+ FSL_IMX7_CSI_ADDR = 0x30710000,
+ FSL_IMX7_CSI_SIZE = (4 * KiB),
+
+ FSL_IMX7_PXP_ADDR = 0x30700000,
+ FSL_IMX7_PXP_SIZE = 0x4000,
+
+ FSL_IMX7_EPDC_ADDR = 0x306F0000,
+ FSL_IMX7_EPDC_SIZE = (4 * KiB),
+
+ FSL_IMX7_PCIE_PHY_ADDR = 0x306D0000,
+ FSL_IMX7_PCIE_PHY_SIZE = (4 * KiB),
+
+ FSL_IMX7_SYSCNT_CTRL_ADDR = 0x306C0000,
+ FSL_IMX7_SYSCNT_CMP_ADDR = 0x306B0000,
+ FSL_IMX7_SYSCNT_RD_ADDR = 0x306A0000,
+
+ FSL_IMX7_PWM4_ADDR = 0x30690000,
+ FSL_IMX7_PWM3_ADDR = 0x30680000,
+ FSL_IMX7_PWM2_ADDR = 0x30670000,
+ FSL_IMX7_PWM1_ADDR = 0x30660000,
+ FSL_IMX7_PWMn_SIZE = (4 * KiB),
+
+ FSL_IMX7_FlEXTIMER2_ADDR = 0x30650000,
+ FSL_IMX7_FlEXTIMER1_ADDR = 0x30640000,
+ FSL_IMX7_FLEXTIMERn_SIZE = (4 * KiB),
+
+ FSL_IMX7_ECSPI4_ADDR = 0x30630000,
+
+ FSL_IMX7_ADC2_ADDR = 0x30620000,
+ FSL_IMX7_ADC1_ADDR = 0x30610000,
+ FSL_IMX7_ADCn_SIZE = (4 * KiB),
+
+ FSL_IMX7_AIPS2_CONF_ADDR = 0x305F0000,
+ FSL_IMX7_AIPS2_CONF_SIZE = (64 * KiB),
+
+ /* AIPS-2 End */
+
+ /* AIPS-1 Begin */
+
+ FSL_IMX7_CSU_ADDR = 0x303E0000,
+ FSL_IMX7_CSU_SIZE = (64 * KiB),
+
+ FSL_IMX7_RDC_ADDR = 0x303D0000,
+ FSL_IMX7_RDC_SIZE = (4 * KiB),
+
+ FSL_IMX7_SEMAPHORE2_ADDR = 0x303C0000,
+ FSL_IMX7_SEMAPHORE1_ADDR = 0x303B0000,
+ FSL_IMX7_SEMAPHOREn_SIZE = (4 * KiB),
+
+ FSL_IMX7_GPC_ADDR = 0x303A0000,
+
+ FSL_IMX7_SRC_ADDR = 0x30390000,
+
+ FSL_IMX7_CCM_ADDR = 0x30380000,
+
+ FSL_IMX7_SNVS_HP_ADDR = 0x30370000,
+
+ FSL_IMX7_ANALOG_ADDR = 0x30360000,
+
+ FSL_IMX7_OCOTP_ADDR = 0x30350000,
+ FSL_IMX7_OCOTP_SIZE = 0x10000,
+
+ FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
+ FSL_IMX7_IOMUXC_GPR_SIZE = (4 * KiB),
+
+ FSL_IMX7_IOMUXC_ADDR = 0x30330000,
+ FSL_IMX7_IOMUXC_SIZE = (4 * KiB),
+
+ FSL_IMX7_KPP_ADDR = 0x30320000,
+ FSL_IMX7_KPP_SIZE = (4 * KiB),
+
+ FSL_IMX7_ROMCP_ADDR = 0x30310000,
+ FSL_IMX7_ROMCP_SIZE = (4 * KiB),
+
+ FSL_IMX7_GPT4_ADDR = 0x30300000,
+ FSL_IMX7_GPT3_ADDR = 0x302F0000,
+ FSL_IMX7_GPT2_ADDR = 0x302E0000,
+ FSL_IMX7_GPT1_ADDR = 0x302D0000,
+
+ FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
+ FSL_IMX7_IOMUXC_LPSR_SIZE = (4 * KiB),
+
+ FSL_IMX7_WDOG4_ADDR = 0x302B0000,
+ FSL_IMX7_WDOG3_ADDR = 0x302A0000,
+ FSL_IMX7_WDOG2_ADDR = 0x30290000,
+ FSL_IMX7_WDOG1_ADDR = 0x30280000,
+
+ FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
+
+ FSL_IMX7_GPIO7_ADDR = 0x30260000,
+ FSL_IMX7_GPIO6_ADDR = 0x30250000,
+ FSL_IMX7_GPIO5_ADDR = 0x30240000,
+ FSL_IMX7_GPIO4_ADDR = 0x30230000,
+ FSL_IMX7_GPIO3_ADDR = 0x30220000,
+ FSL_IMX7_GPIO2_ADDR = 0x30210000,
+ FSL_IMX7_GPIO1_ADDR = 0x30200000,
+
+ FSL_IMX7_AIPS1_CONF_ADDR = 0x301F0000,
+ FSL_IMX7_AIPS1_CONF_SIZE = (64 * KiB),
- FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
FSL_IMX7_A7MPCORE_DAP_ADDR = 0x30000000,
+ FSL_IMX7_A7MPCORE_DAP_SIZE = (1 * MiB),
- FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
- FSL_IMX7_PCIE_REG_SIZE = 16 * 1024,
+ /* AIPS-1 End */
- FSL_IMX7_GPR_ADDR = 0x30340000,
+ FSL_IMX7_EIM_CS0_ADDR = 0x28000000,
+ FSL_IMX7_EIM_CS0_SIZE = (128 * MiB),
- FSL_IMX7_DMA_APBH_ADDR = 0x33000000,
- FSL_IMX7_DMA_APBH_SIZE = 0x2000,
+ FSL_IMX7_OCRAM_PXP_ADDR = 0x00940000,
+ FSL_IMX7_OCRAM_PXP_SIZE = (32 * KiB),
+
+ FSL_IMX7_OCRAM_EPDC_ADDR = 0x00920000,
+ FSL_IMX7_OCRAM_EPDC_SIZE = (128 * KiB),
+
+ FSL_IMX7_OCRAM_MEM_ADDR = 0x00900000,
+ FSL_IMX7_OCRAM_MEM_SIZE = (128 * KiB),
+
+ FSL_IMX7_TCMU_ADDR = 0x00800000,
+ FSL_IMX7_TCMU_SIZE = (32 * KiB),
+
+ FSL_IMX7_TCML_ADDR = 0x007F8000,
+ FSL_IMX7_TCML_SIZE = (32 * KiB),
+
+ FSL_IMX7_OCRAM_S_ADDR = 0x00180000,
+ FSL_IMX7_OCRAM_S_SIZE = (32 * KiB),
+
+ FSL_IMX7_CAAM_MEM_ADDR = 0x00100000,
+ FSL_IMX7_CAAM_MEM_SIZE = (32 * KiB),
+
+ FSL_IMX7_ROM_ADDR = 0x00000000,
+ FSL_IMX7_ROM_SIZE = (96 * KiB),
};
enum FslIMX7IRQs {
@@ -229,6 +400,26 @@ enum FslIMX7IRQs {
FSL_IMX7_USB2_IRQ = 42,
FSL_IMX7_USB3_IRQ = 40,
+ FSL_IMX7_GPT1_IRQ = 55,
+ FSL_IMX7_GPT2_IRQ = 54,
+ FSL_IMX7_GPT3_IRQ = 53,
+ FSL_IMX7_GPT4_IRQ = 52,
+
+ FSL_IMX7_GPIO1_LOW_IRQ = 64,
+ FSL_IMX7_GPIO1_HIGH_IRQ = 65,
+ FSL_IMX7_GPIO2_LOW_IRQ = 66,
+ FSL_IMX7_GPIO2_HIGH_IRQ = 67,
+ FSL_IMX7_GPIO3_LOW_IRQ = 68,
+ FSL_IMX7_GPIO3_HIGH_IRQ = 69,
+ FSL_IMX7_GPIO4_LOW_IRQ = 70,
+ FSL_IMX7_GPIO4_HIGH_IRQ = 71,
+ FSL_IMX7_GPIO5_LOW_IRQ = 72,
+ FSL_IMX7_GPIO5_HIGH_IRQ = 73,
+ FSL_IMX7_GPIO6_LOW_IRQ = 74,
+ FSL_IMX7_GPIO6_HIGH_IRQ = 75,
+ FSL_IMX7_GPIO7_LOW_IRQ = 76,
+ FSL_IMX7_GPIO7_HIGH_IRQ = 77,
+
FSL_IMX7_WDOG1_IRQ = 78,
FSL_IMX7_WDOG2_IRQ = 79,
FSL_IMX7_WDOG3_IRQ = 10,
diff --git a/include/hw/arm/linux-boot-if.h b/include/hw/arm/linux-boot-if.h
index 7bbdfd1cc6..c85f33b2c5 100644
--- a/include/hw/arm/linux-boot-if.h
+++ b/include/hw/arm/linux-boot-if.h
@@ -9,16 +9,15 @@
#include "qom/object.h"
#define TYPE_ARM_LINUX_BOOT_IF "arm-linux-boot-if"
-#define ARM_LINUX_BOOT_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMLinuxBootIfClass, (klass), TYPE_ARM_LINUX_BOOT_IF)
-#define ARM_LINUX_BOOT_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMLinuxBootIfClass, (obj), TYPE_ARM_LINUX_BOOT_IF)
+typedef struct ARMLinuxBootIfClass ARMLinuxBootIfClass;
+DECLARE_CLASS_CHECKERS(ARMLinuxBootIfClass, ARM_LINUX_BOOT_IF,
+ TYPE_ARM_LINUX_BOOT_IF)
#define ARM_LINUX_BOOT_IF(obj) \
INTERFACE_CHECK(ARMLinuxBootIf, (obj), TYPE_ARM_LINUX_BOOT_IF)
typedef struct ARMLinuxBootIf ARMLinuxBootIf;
-typedef struct ARMLinuxBootIfClass {
+struct ARMLinuxBootIfClass {
/*< private >*/
InterfaceClass parent_class;
@@ -35,6 +34,6 @@ typedef struct ARMLinuxBootIfClass {
* (or for a CPU which doesn't support TrustZone)
*/
void (*arm_linux_init)(ARMLinuxBootIf *obj, bool secure_boot);
-} ARMLinuxBootIfClass;
+};
#endif
diff --git a/include/hw/arm/msf2-soc.h b/include/hw/arm/msf2-soc.h
index c9cb214aa6..9300664e8e 100644
--- a/include/hw/arm/msf2-soc.h
+++ b/include/hw/arm/msf2-soc.h
@@ -30,9 +30,11 @@
#include "hw/misc/msf2-sysreg.h"
#include "hw/ssi/mss-spi.h"
#include "hw/net/msf2-emac.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_MSF2_SOC "msf2-soc"
-#define MSF2_SOC(obj) OBJECT_CHECK(MSF2State, (obj), TYPE_MSF2_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2State, MSF2_SOC)
#define MSF2_NUM_SPIS 2
#define MSF2_NUM_UARTS 2
@@ -44,19 +46,17 @@
*/
#define MSF2_NUM_TIMERS 2
-typedef struct MSF2State {
- /*< private >*/
+struct MSF2State {
SysBusDevice parent_obj;
- /*< public >*/
ARMv7MState armv7m;
- char *cpu_type;
char *part_name;
uint64_t envm_size;
uint64_t esram_size;
- uint32_t m3clk;
+ Clock *m3clk;
+ Clock *refclk;
uint8_t apb0div;
uint8_t apb1div;
@@ -64,6 +64,10 @@ typedef struct MSF2State {
MSSTimerState timer;
MSSSpiState spi[MSF2_NUM_SPIS];
MSF2EmacState emac;
-} MSF2State;
+
+ MemoryRegion nvm;
+ MemoryRegion nvm_alias;
+ MemoryRegion sram;
+};
#endif
diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h
new file mode 100644
index 0000000000..4e0d210188
--- /dev/null
+++ b/include/hw/arm/npcm7xx.h
@@ -0,0 +1,139 @@
+/*
+ * Nuvoton NPCM7xx SoC family.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_H
+#define NPCM7XX_H
+
+#include "hw/boards.h"
+#include "hw/adc/npcm7xx_adc.h"
+#include "hw/core/split-irq.h"
+#include "hw/cpu/a9mpcore.h"
+#include "hw/gpio/npcm7xx_gpio.h"
+#include "hw/i2c/npcm7xx_smbus.h"
+#include "hw/mem/npcm7xx_mc.h"
+#include "hw/misc/npcm7xx_clk.h"
+#include "hw/misc/npcm7xx_gcr.h"
+#include "hw/misc/npcm7xx_mft.h"
+#include "hw/misc/npcm7xx_pwm.h"
+#include "hw/misc/npcm7xx_rng.h"
+#include "hw/net/npcm7xx_emc.h"
+#include "hw/net/npcm_gmac.h"
+#include "hw/nvram/npcm7xx_otp.h"
+#include "hw/timer/npcm7xx_timer.h"
+#include "hw/ssi/npcm7xx_fiu.h"
+#include "hw/ssi/npcm_pspi.h"
+#include "hw/usb/hcd-ehci.h"
+#include "hw/usb/hcd-ohci.h"
+#include "target/arm/cpu.h"
+#include "hw/sd/npcm7xx_sdhci.h"
+
+#define NPCM7XX_MAX_NUM_CPUS (2)
+
+/* The first half of the address space is reserved for DDR4 DRAM. */
+#define NPCM7XX_DRAM_BA (0x00000000)
+#define NPCM7XX_DRAM_SZ (2 * GiB)
+
+/* Magic addresses for setting up direct kernel booting and SMP boot stubs. */
+#define NPCM7XX_LOADER_START (0x00000000) /* Start of SDRAM */
+#define NPCM7XX_SMP_LOADER_START (0xffff0000) /* Boot ROM */
+#define NPCM7XX_SMP_BOOTREG_ADDR (0xf080013c) /* GCR.SCRPAD */
+#define NPCM7XX_GIC_CPU_IF_ADDR (0xf03fe100) /* GIC within A9 */
+#define NPCM7XX_BOARD_SETUP_ADDR (0xffff1000) /* Boot ROM */
+
+#define NPCM7XX_NR_PWM_MODULES 2
+
+struct NPCM7xxMachine {
+ MachineState parent;
+ /*
+ * PWM fan splitter. each splitter connects to one PWM output and
+ * multiple MFT inputs.
+ */
+ SplitIRQ fan_splitter[NPCM7XX_NR_PWM_MODULES *
+ NPCM7XX_PWM_PER_MODULE];
+};
+
+#define TYPE_NPCM7XX_MACHINE MACHINE_TYPE_NAME("npcm7xx")
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxMachine, NPCM7XX_MACHINE)
+
+typedef struct NPCM7xxMachineClass {
+ MachineClass parent;
+
+ const char *soc_type;
+} NPCM7xxMachineClass;
+
+#define NPCM7XX_MACHINE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(NPCM7xxMachineClass, (klass), TYPE_NPCM7XX_MACHINE)
+#define NPCM7XX_MACHINE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(NPCM7xxMachineClass, (obj), TYPE_NPCM7XX_MACHINE)
+
+struct NPCM7xxState {
+ DeviceState parent;
+
+ ARMCPU cpu[NPCM7XX_MAX_NUM_CPUS];
+ A9MPPrivState a9mpcore;
+
+ MemoryRegion sram;
+ MemoryRegion irom;
+ MemoryRegion ram3;
+ MemoryRegion *dram;
+
+ NPCM7xxGCRState gcr;
+ NPCM7xxCLKState clk;
+ NPCM7xxTimerCtrlState tim[3];
+ NPCM7xxADCState adc;
+ NPCM7xxPWMState pwm[NPCM7XX_NR_PWM_MODULES];
+ NPCM7xxMFTState mft[8];
+ NPCM7xxOTPState key_storage;
+ NPCM7xxOTPState fuse_array;
+ NPCM7xxMCState mc;
+ NPCM7xxRNGState rng;
+ NPCM7xxGPIOState gpio[8];
+ NPCM7xxSMBusState smbus[16];
+ EHCISysBusState ehci;
+ OHCISysBusState ohci;
+ NPCM7xxFIUState fiu[2];
+ NPCM7xxEMCState emc[2];
+ NPCMGMACState gmac[2];
+ NPCM7xxSDHCIState mmc;
+ NPCMPSPIState pspi[2];
+};
+
+#define TYPE_NPCM7XX "npcm7xx"
+OBJECT_DECLARE_TYPE(NPCM7xxState, NPCM7xxClass, NPCM7XX)
+
+#define TYPE_NPCM730 "npcm730"
+#define TYPE_NPCM750 "npcm750"
+
+typedef struct NPCM7xxClass {
+ DeviceClass parent;
+
+ /* Bitmask of modules that are permanently disabled on this chip. */
+ uint32_t disabled_modules;
+ /* Number of CPU cores enabled in this SoC class (may be 1 or 2). */
+ uint32_t num_cpus;
+} NPCM7xxClass;
+
+/**
+ * npcm7xx_load_kernel - Loads memory with everything needed to boot
+ * @machine - The machine containing the SoC to be booted.
+ * @soc - The SoC containing the CPU to be booted.
+ *
+ * This will set up the ARM boot info structure for the specific NPCM7xx
+ * derivative and call arm_load_kernel() to set up loading of the kernel, etc.
+ * into memory, if requested by the user.
+ */
+void npcm7xx_load_kernel(MachineState *machine, NPCM7xxState *soc);
+
+#endif /* NPCM7XX_H */
diff --git a/include/hw/arm/nrf51_soc.h b/include/hw/arm/nrf51_soc.h
index 0cb78aafea..e52a56e75e 100644
--- a/include/hw/arm/nrf51_soc.h
+++ b/include/hw/arm/nrf51_soc.h
@@ -17,14 +17,15 @@
#include "hw/gpio/nrf51_gpio.h"
#include "hw/nvram/nrf51_nvm.h"
#include "hw/timer/nrf51_timer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_NRF51_SOC "nrf51-soc"
-#define NRF51_SOC(obj) \
- OBJECT_CHECK(NRF51State, (obj), TYPE_NRF51_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51State, NRF51_SOC)
#define NRF51_NUM_TIMERS 3
-typedef struct NRF51State {
+struct NRF51State {
/*< private >*/
SysBusDevice parent_obj;
@@ -50,6 +51,7 @@ typedef struct NRF51State {
MemoryRegion container;
-} NRF51State;
+ Clock *sysclk;
+};
#endif
diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h
index 6be386d0e2..40ee8ea9e5 100644
--- a/include/hw/arm/omap.h
+++ b/include/hw/arm/omap.h
@@ -24,6 +24,7 @@
#include "hw/input/tsc2xxx.h"
#include "target/arm/cpu-qom.h"
#include "qemu/log.h"
+#include "qom/object.h"
# define OMAP_EMIFS_BASE 0x00000000
# define OMAP2_Q0_BASE 0x00000000
@@ -69,10 +70,9 @@ void omap_clk_reparent(omap_clk clk, omap_clk parent);
/* omap_intc.c */
#define TYPE_OMAP_INTC "common-omap-intc"
-#define OMAP_INTC(obj) \
- OBJECT_CHECK(omap_intr_handler, (obj), TYPE_OMAP_INTC)
+typedef struct OMAPIntcState OMAPIntcState;
+DECLARE_INSTANCE_CHECKER(OMAPIntcState, OMAP_INTC, TYPE_OMAP_INTC)
-typedef struct omap_intr_handler_s omap_intr_handler;
/*
* TODO: Ideally we should have a clock framework that
@@ -88,14 +88,13 @@ typedef struct omap_intr_handler_s omap_intr_handler;
* (ie the struct omap_mpu_state_s*) to do the clockname to pointer
* translation.)
*/
-void omap_intc_set_iclk(omap_intr_handler *intc, omap_clk clk);
-void omap_intc_set_fclk(omap_intr_handler *intc, omap_clk clk);
+void omap_intc_set_iclk(OMAPIntcState *intc, omap_clk clk);
+void omap_intc_set_fclk(OMAPIntcState *intc, omap_clk clk);
/* omap_i2c.c */
#define TYPE_OMAP_I2C "omap_i2c"
-#define OMAP_I2C(obj) OBJECT_CHECK(OMAPI2CState, (obj), TYPE_OMAP_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(OMAPI2CState, OMAP_I2C)
-typedef struct OMAPI2CState OMAPI2CState;
/* TODO: clock framework (see above) */
void omap_i2c_set_iclk(OMAPI2CState *i2c, omap_clk clk);
@@ -103,21 +102,20 @@ void omap_i2c_set_fclk(OMAPI2CState *i2c, omap_clk clk);
/* omap_gpio.c */
#define TYPE_OMAP1_GPIO "omap-gpio"
-#define OMAP1_GPIO(obj) \
- OBJECT_CHECK(struct omap_gpif_s, (obj), TYPE_OMAP1_GPIO)
+typedef struct Omap1GpioState Omap1GpioState;
+DECLARE_INSTANCE_CHECKER(Omap1GpioState, OMAP1_GPIO,
+ TYPE_OMAP1_GPIO)
#define TYPE_OMAP2_GPIO "omap2-gpio"
-#define OMAP2_GPIO(obj) \
- OBJECT_CHECK(struct omap2_gpif_s, (obj), TYPE_OMAP2_GPIO)
-
-typedef struct omap_gpif_s omap_gpif;
-typedef struct omap2_gpif_s omap2_gpif;
+typedef struct Omap2GpioState Omap2GpioState;
+DECLARE_INSTANCE_CHECKER(Omap2GpioState, OMAP2_GPIO,
+ TYPE_OMAP2_GPIO)
/* TODO: clock framework (see above) */
-void omap_gpio_set_clk(omap_gpif *gpio, omap_clk clk);
+void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk);
-void omap2_gpio_set_iclk(omap2_gpif *gpio, omap_clk clk);
-void omap2_gpio_set_fclk(omap2_gpif *gpio, uint8_t i, omap_clk clk);
+void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk);
+void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk);
/* OMAP2 l4 Interconnect */
struct omap_l4_s;
@@ -726,7 +724,6 @@ struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem,
qemu_irq txdma, qemu_irq rxdma,
const char *label, Chardev *chr);
void omap_uart_reset(struct omap_uart_s *s);
-void omap_uart_attach(struct omap_uart_s *s, Chardev *chr);
struct omap_mpuio_s;
qemu_irq *omap_mpuio_in_get(struct omap_mpuio_s *s);
@@ -1011,7 +1008,8 @@ void omap_mpu_wakeup(void *opaque, int irq, int req);
__func__, paddr)
/* OMAP-specific Linux bootloader tags for the ATAG_BOARD area
- (Board-specifc tags are not here) */
+ * (Board-specific tags are not here)
+ */
#define OMAP_TAG_CLOCK 0x4f01
#define OMAP_TAG_MMC 0x4f02
#define OMAP_TAG_SERIAL_CONSOLE 0x4f03
diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h
index 8843e5f910..4c6caee113 100644
--- a/include/hw/arm/pxa.h
+++ b/include/hw/arm/pxa.h
@@ -13,6 +13,7 @@
#include "exec/memory.h"
#include "target/arm/cpu-qom.h"
#include "hw/pcmcia.h"
+#include "qom/object.h"
/* Interrupt numbers */
# define PXA2XX_PIC_SSP3 0
@@ -86,18 +87,19 @@ PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem,
void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler);
/* pxa2xx_mmci.c */
-typedef struct PXA2xxMMCIState PXA2xxMMCIState;
+#define TYPE_PXA2XX_MMCI "pxa2xx-mmci"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxMMCIState, PXA2XX_MMCI)
+
PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem,
hwaddr base,
- BlockBackend *blk, qemu_irq irq,
- qemu_irq rx_dma, qemu_irq tx_dma);
+ qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma);
void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly,
qemu_irq coverswitch);
/* pxa2xx_pcmcia.c */
-typedef struct PXA2xxPCMCIAState PXA2xxPCMCIAState;
-PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem,
- hwaddr base);
+#define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPCMCIAState, PXA2XX_PCMCIA)
+
int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card);
int pxa2xx_pcmcia_detach(void *opaque);
void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq);
@@ -115,13 +117,17 @@ void pxa27x_register_keypad(PXA2xxKeyPadState *kp,
const struct keymap *map, int size);
/* pxa2xx.c */
-typedef struct PXA2xxI2CState PXA2xxI2CState;
+#define TYPE_PXA2XX_I2C "pxa2xx_i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CState, PXA2XX_I2C)
+
PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base,
qemu_irq irq, uint32_t page_size);
I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s);
typedef struct PXA2xxI2SState PXA2xxI2SState;
-typedef struct PXA2xxFIrState PXA2xxFIrState;
+
+#define TYPE_PXA2XX_FIR "pxa2xx-fir"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxFIrState, PXA2XX_FIR)
typedef struct {
ARMCPU *cpu;
@@ -185,8 +191,7 @@ struct PXA2xxI2SState {
# define PA_FMT "0x%08lx"
-PXA2xxState *pxa270_init(MemoryRegion *address_space, unsigned int sdram_size,
- const char *revision);
-PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size);
+PXA2xxState *pxa270_init(unsigned int sdram_size, const char *revision);
+PXA2xxState *pxa255_init(unsigned int sdram_size);
#endif /* PXA_H */
diff --git a/include/hw/arm/raspberrypi-fw-defs.h b/include/hw/arm/raspberrypi-fw-defs.h
new file mode 100644
index 0000000000..8b404e0533
--- /dev/null
+++ b/include/hw/arm/raspberrypi-fw-defs.h
@@ -0,0 +1,173 @@
+/*
+ * Raspberry Pi firmware definitions
+ *
+ * Copyright (C) 2022 Auriga LLC, based on Linux kernel
+ * `include/soc/bcm2835/raspberrypi-firmware.h` (Copyright © 2015 Broadcom)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
+#define INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
+
+
+enum rpi_firmware_property_tag {
+ RPI_FWREQ_PROPERTY_END = 0,
+ RPI_FWREQ_GET_FIRMWARE_REVISION = 0x00000001,
+ RPI_FWREQ_GET_FIRMWARE_VARIANT = 0x00000002,
+ RPI_FWREQ_GET_FIRMWARE_HASH = 0x00000003,
+
+ RPI_FWREQ_SET_CURSOR_INFO = 0x00008010,
+ RPI_FWREQ_SET_CURSOR_STATE = 0x00008011,
+
+ RPI_FWREQ_GET_BOARD_MODEL = 0x00010001,
+ RPI_FWREQ_GET_BOARD_REVISION = 0x00010002,
+ RPI_FWREQ_GET_BOARD_MAC_ADDRESS = 0x00010003,
+ RPI_FWREQ_GET_BOARD_SERIAL = 0x00010004,
+ RPI_FWREQ_GET_ARM_MEMORY = 0x00010005,
+ RPI_FWREQ_GET_VC_MEMORY = 0x00010006,
+ RPI_FWREQ_GET_CLOCKS = 0x00010007,
+ RPI_FWREQ_GET_POWER_STATE = 0x00020001,
+ RPI_FWREQ_GET_TIMING = 0x00020002,
+ RPI_FWREQ_SET_POWER_STATE = 0x00028001,
+ RPI_FWREQ_GET_CLOCK_STATE = 0x00030001,
+ RPI_FWREQ_GET_CLOCK_RATE = 0x00030002,
+ RPI_FWREQ_GET_VOLTAGE = 0x00030003,
+ RPI_FWREQ_GET_MAX_CLOCK_RATE = 0x00030004,
+ RPI_FWREQ_GET_MAX_VOLTAGE = 0x00030005,
+ RPI_FWREQ_GET_TEMPERATURE = 0x00030006,
+ RPI_FWREQ_GET_MIN_CLOCK_RATE = 0x00030007,
+ RPI_FWREQ_GET_MIN_VOLTAGE = 0x00030008,
+ RPI_FWREQ_GET_TURBO = 0x00030009,
+ RPI_FWREQ_GET_MAX_TEMPERATURE = 0x0003000a,
+ RPI_FWREQ_GET_STC = 0x0003000b,
+ RPI_FWREQ_ALLOCATE_MEMORY = 0x0003000c,
+ RPI_FWREQ_LOCK_MEMORY = 0x0003000d,
+ RPI_FWREQ_UNLOCK_MEMORY = 0x0003000e,
+ RPI_FWREQ_RELEASE_MEMORY = 0x0003000f,
+ RPI_FWREQ_EXECUTE_CODE = 0x00030010,
+ RPI_FWREQ_EXECUTE_QPU = 0x00030011,
+ RPI_FWREQ_SET_ENABLE_QPU = 0x00030012,
+ RPI_FWREQ_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
+ RPI_FWREQ_GET_EDID_BLOCK = 0x00030020,
+ RPI_FWREQ_GET_CUSTOMER_OTP = 0x00030021,
+ RPI_FWREQ_GET_EDID_BLOCK_DISPLAY = 0x00030023,
+ RPI_FWREQ_GET_DOMAIN_STATE = 0x00030030,
+ RPI_FWREQ_GET_THROTTLED = 0x00030046,
+ RPI_FWREQ_GET_CLOCK_MEASURED = 0x00030047,
+ RPI_FWREQ_NOTIFY_REBOOT = 0x00030048,
+ RPI_FWREQ_SET_CLOCK_STATE = 0x00038001,
+ RPI_FWREQ_SET_CLOCK_RATE = 0x00038002,
+ RPI_FWREQ_SET_VOLTAGE = 0x00038003,
+ RPI_FWREQ_SET_MAX_CLOCK_RATE = 0x00038004,
+ RPI_FWREQ_SET_MIN_CLOCK_RATE = 0x00038007,
+ RPI_FWREQ_SET_TURBO = 0x00038009,
+ RPI_FWREQ_SET_CUSTOMER_OTP = 0x00038021,
+ RPI_FWREQ_SET_DOMAIN_STATE = 0x00038030,
+ RPI_FWREQ_GET_GPIO_STATE = 0x00030041,
+ RPI_FWREQ_SET_GPIO_STATE = 0x00038041,
+ RPI_FWREQ_SET_SDHOST_CLOCK = 0x00038042,
+ RPI_FWREQ_GET_GPIO_CONFIG = 0x00030043,
+ RPI_FWREQ_SET_GPIO_CONFIG = 0x00038043,
+ RPI_FWREQ_GET_PERIPH_REG = 0x00030045,
+ RPI_FWREQ_SET_PERIPH_REG = 0x00038045,
+ RPI_FWREQ_GET_POE_HAT_VAL = 0x00030049,
+ RPI_FWREQ_SET_POE_HAT_VAL = 0x00038049,
+ RPI_FWREQ_SET_POE_HAT_VAL_OLD = 0x00030050,
+ RPI_FWREQ_NOTIFY_XHCI_RESET = 0x00030058,
+ RPI_FWREQ_GET_REBOOT_FLAGS = 0x00030064,
+ RPI_FWREQ_SET_REBOOT_FLAGS = 0x00038064,
+ RPI_FWREQ_NOTIFY_DISPLAY_DONE = 0x00030066,
+
+ /* Dispmanx TAGS */
+ RPI_FWREQ_FRAMEBUFFER_ALLOCATE = 0x00040001,
+ RPI_FWREQ_FRAMEBUFFER_BLANK = 0x00040002,
+ RPI_FWREQ_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT = 0x00040003,
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT = 0x00040004,
+ RPI_FWREQ_FRAMEBUFFER_GET_DEPTH = 0x00040005,
+ RPI_FWREQ_FRAMEBUFFER_GET_PIXEL_ORDER = 0x00040006,
+ RPI_FWREQ_FRAMEBUFFER_GET_ALPHA_MODE = 0x00040007,
+ RPI_FWREQ_FRAMEBUFFER_GET_PITCH = 0x00040008,
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
+ RPI_FWREQ_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
+ RPI_FWREQ_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
+ RPI_FWREQ_FRAMEBUFFER_GET_LAYER = 0x0004000c,
+ RPI_FWREQ_FRAMEBUFFER_GET_TRANSFORM = 0x0004000d,
+ RPI_FWREQ_FRAMEBUFFER_GET_VSYNC = 0x0004000e,
+ RPI_FWREQ_FRAMEBUFFER_GET_TOUCHBUF = 0x0004000f,
+ RPI_FWREQ_FRAMEBUFFER_GET_GPIOVIRTBUF = 0x00040010,
+ RPI_FWREQ_FRAMEBUFFER_RELEASE = 0x00048001,
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_ID = 0x00040016,
+ RPI_FWREQ_FRAMEBUFFER_SET_DISPLAY_NUM = 0x00048013,
+ RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS = 0x00040013,
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_SETTINGS = 0x00040014,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
+ RPI_FWREQ_FRAMEBUFFER_TEST_DEPTH = 0x00044005,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PIXEL_ORDER = 0x00044006,
+ RPI_FWREQ_FRAMEBUFFER_TEST_ALPHA_MODE = 0x00044007,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
+ RPI_FWREQ_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
+ RPI_FWREQ_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
+ RPI_FWREQ_FRAMEBUFFER_TEST_LAYER = 0x0004400c,
+ RPI_FWREQ_FRAMEBUFFER_TEST_TRANSFORM = 0x0004400d,
+ RPI_FWREQ_FRAMEBUFFER_TEST_VSYNC = 0x0004400e,
+ RPI_FWREQ_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
+ RPI_FWREQ_FRAMEBUFFER_SET_DEPTH = 0x00048005,
+ RPI_FWREQ_FRAMEBUFFER_SET_PIXEL_ORDER = 0x00048006,
+ RPI_FWREQ_FRAMEBUFFER_SET_ALPHA_MODE = 0x00048007,
+ RPI_FWREQ_FRAMEBUFFER_SET_PITCH = 0x00048008,
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
+ RPI_FWREQ_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
+ RPI_FWREQ_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+
+ RPI_FWREQ_FRAMEBUFFER_SET_TOUCHBUF = 0x0004801f,
+ RPI_FWREQ_FRAMEBUFFER_SET_GPIOVIRTBUF = 0x00048020,
+ RPI_FWREQ_FRAMEBUFFER_SET_VSYNC = 0x0004800e,
+ RPI_FWREQ_FRAMEBUFFER_SET_LAYER = 0x0004800c,
+ RPI_FWREQ_FRAMEBUFFER_SET_TRANSFORM = 0x0004800d,
+ RPI_FWREQ_FRAMEBUFFER_SET_BACKLIGHT = 0x0004800f,
+
+ RPI_FWREQ_VCHIQ_INIT = 0x00048010,
+
+ RPI_FWREQ_SET_PLANE = 0x00048015,
+ RPI_FWREQ_GET_DISPLAY_TIMING = 0x00040017,
+ RPI_FWREQ_SET_TIMING = 0x00048017,
+ RPI_FWREQ_GET_DISPLAY_CFG = 0x00040018,
+ RPI_FWREQ_SET_DISPLAY_POWER = 0x00048019,
+ RPI_FWREQ_GET_COMMAND_LINE = 0x00050001,
+ RPI_FWREQ_GET_DMA_CHANNELS = 0x00060001,
+};
+
+enum rpi_firmware_clk_id {
+ RPI_FIRMWARE_EMMC_CLK_ID = 1,
+ RPI_FIRMWARE_UART_CLK_ID,
+ RPI_FIRMWARE_ARM_CLK_ID,
+ RPI_FIRMWARE_CORE_CLK_ID,
+ RPI_FIRMWARE_V3D_CLK_ID,
+ RPI_FIRMWARE_H264_CLK_ID,
+ RPI_FIRMWARE_ISP_CLK_ID,
+ RPI_FIRMWARE_SDRAM_CLK_ID,
+ RPI_FIRMWARE_PIXEL_CLK_ID,
+ RPI_FIRMWARE_PWM_CLK_ID,
+ RPI_FIRMWARE_HEVC_CLK_ID,
+ RPI_FIRMWARE_EMMC2_CLK_ID,
+ RPI_FIRMWARE_M2MC_CLK_ID,
+ RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
+ RPI_FIRMWARE_VEC_CLK_ID,
+ RPI_FIRMWARE_NUM_CLK_ID,
+};
+
+struct rpi_firmware_property_tag_header {
+ uint32_t tag;
+ uint32_t buf_size;
+ uint32_t req_resp_size;
+};
+
+typedef struct rpi_firmware_prop_request {
+ struct rpi_firmware_property_tag_header hdr;
+ uint8_t payload[0];
+} rpi_firmware_prop_request_t;
+
+#endif /* INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ */
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
index 61b04a1bd4..7bc4807fa5 100644
--- a/include/hw/arm/raspi_platform.h
+++ b/include/hw/arm/raspi_platform.h
@@ -18,48 +18,117 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ *
+ * Various undocumented addresses and names come from Herman Hermitage's VC4
+ * documentation:
+ * https://github.com/hermanhermitage/videocoreiv/wiki/MMIO-Register-map
*/
#ifndef HW_ARM_RASPI_PLATFORM_H
#define HW_ARM_RASPI_PLATFORM_H
+#include "hw/boards.h"
+#include "hw/arm/boot.h"
+
+/* Registered machine type (matches RPi Foundation bootloader and U-Boot) */
+#define MACH_TYPE_BCM2708 3138
+
+#define TYPE_RASPI_BASE_MACHINE MACHINE_TYPE_NAME("raspi-base")
+OBJECT_DECLARE_TYPE(RaspiBaseMachineState, RaspiBaseMachineClass,
+ RASPI_BASE_MACHINE)
+
+struct RaspiBaseMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+ /*< public >*/
+ struct arm_boot_info binfo;
+};
+
+struct RaspiBaseMachineClass {
+ /*< private >*/
+ MachineClass parent_obj;
+ /*< public >*/
+ uint32_t board_rev;
+};
+
+/* Common functions for raspberry pi machines */
+const char *board_soc_type(uint32_t board_rev);
+void raspi_machine_init(MachineState *machine);
+
+typedef struct BCM283XBaseState BCM283XBaseState;
+void raspi_base_machine_init(MachineState *machine,
+ BCM283XBaseState *soc);
+
+void raspi_machine_class_common_init(MachineClass *mc,
+ uint32_t board_rev);
+uint64_t board_ram_size(uint32_t board_rev);
+
#define MSYNC_OFFSET 0x0000 /* Multicore Sync Block */
-#define IC0_OFFSET 0x2000
+#define CCPT_OFFSET 0x1000 /* Compact Camera Port 2 TX */
+#define INTE_OFFSET 0x2000 /* VC Interrupt controller */
#define ST_OFFSET 0x3000 /* System Timer */
+#define TXP_OFFSET 0x4000 /* Transposer */
+#define JPEG_OFFSET 0x5000
#define MPHI_OFFSET 0x6000 /* Message-based Parallel Host Intf. */
#define DMA_OFFSET 0x7000 /* DMA controller, channels 0-14 */
-#define ARM_OFFSET 0xB000 /* BCM2708 ARM control block */
+#define ARBA_OFFSET 0x9000
+#define BRDG_OFFSET 0xa000 /* RPiVid ASB for BCM2838 (BCM2711) */
+#define ARM_OFFSET 0xB000 /* ARM control block */
#define ARMCTRL_OFFSET (ARM_OFFSET + 0x000)
#define ARMCTRL_IC_OFFSET (ARM_OFFSET + 0x200) /* Interrupt controller */
-#define ARMCTRL_TIMER0_1_OFFSET (ARM_OFFSET + 0x400) /* Timer 0 and 1 */
+#define ARMCTRL_TIMER0_1_OFFSET (ARM_OFFSET + 0x400) /* Timer 0 and 1 (SP804) */
#define ARMCTRL_0_SBM_OFFSET (ARM_OFFSET + 0x800) /* User 0 (ARM) Semaphores
* Doorbells & Mailboxes */
-#define CPRMAN_OFFSET 0x100000 /* Power Management, Watchdog */
-#define CM_OFFSET 0x101000 /* Clock Management */
-#define A2W_OFFSET 0x102000 /* Reset controller */
+#define PM_OFFSET 0x100000 /* Power Management */
+#define CPRMAN_OFFSET 0x101000 /* Clock Management */
#define AVS_OFFSET 0x103000 /* Audio Video Standard */
#define RNG_OFFSET 0x104000
#define GPIO_OFFSET 0x200000
-#define UART0_OFFSET 0x201000
-#define MMCI0_OFFSET 0x202000
-#define I2S_OFFSET 0x203000
-#define SPI0_OFFSET 0x204000
+#define UART0_OFFSET 0x201000 /* PL011 */
+#define MMCI0_OFFSET 0x202000 /* Legacy MMC */
+#define I2S_OFFSET 0x203000 /* PCM */
+#define SPI0_OFFSET 0x204000 /* SPI master */
#define BSC0_OFFSET 0x205000 /* BSC0 I2C/TWI */
+#define PIXV0_OFFSET 0x206000
+#define PIXV1_OFFSET 0x207000
+#define DPI_OFFSET 0x208000
+#define DSI0_OFFSET 0x209000 /* Display Serial Interface */
+#define PWM_OFFSET 0x20c000
+#define PERM_OFFSET 0x20d000
+#define TEC_OFFSET 0x20e000
#define OTP_OFFSET 0x20f000
+#define SLIM_OFFSET 0x210000 /* SLIMbus */
+#define CPG_OFFSET 0x211000
#define THERMAL_OFFSET 0x212000
-#define BSC_SL_OFFSET 0x214000 /* SPI slave */
+#define AVSP_OFFSET 0x213000
+#define BSC_SL_OFFSET 0x214000 /* SPI slave (bootrom) */
#define AUX_OFFSET 0x215000 /* AUX: UART1/SPI1/SPI2 */
#define EMMC1_OFFSET 0x300000
+#define EMMC2_OFFSET 0x340000
+#define HVS_OFFSET 0x400000
#define SMI_OFFSET 0x600000
+#define DSI1_OFFSET 0x700000
+#define UCAM_OFFSET 0x800000
+#define CMI_OFFSET 0x802000
#define BSC1_OFFSET 0x804000 /* BSC1 I2C/TWI */
#define BSC2_OFFSET 0x805000 /* BSC2 I2C/TWI */
+#define VECA_OFFSET 0x806000
+#define PIXV2_OFFSET 0x807000
+#define HDMI_OFFSET 0x808000
+#define HDCP_OFFSET 0x809000
+#define ARBR0_OFFSET 0x80a000
#define DBUS_OFFSET 0x900000
#define AVE0_OFFSET 0x910000
#define USB_OTG_OFFSET 0x980000 /* DTC_OTG USB controller */
+#define V3D_OFFSET 0xc00000
#define SDRAMC_OFFSET 0xe00000
+#define L2CC_OFFSET 0xe01000 /* Level 2 Cache controller */
+#define L1CC_OFFSET 0xe02000 /* Level 1 Cache controller */
+#define ARBR1_OFFSET 0xe04000
#define DMA15_OFFSET 0xE05000 /* DMA controller, channel 15 */
+#define DCRC_OFFSET 0xe07000
+#define AXIP_OFFSET 0xe08000
/* GPU interrupts */
#define INTERRUPT_TIMER0 0
@@ -137,4 +206,14 @@
#define INTERRUPT_ILLEGAL_TYPE0 6
#define INTERRUPT_ILLEGAL_TYPE1 7
+/* Clock rates */
+#define RPI_FIRMWARE_EMMC_CLK_RATE 50000000
+#define RPI_FIRMWARE_UART_CLK_RATE 3000000
+/*
+ * TODO: this is really SoC-specific; we might want to
+ * set it per-SoC if it turns out any guests care.
+ */
+#define RPI_FIRMWARE_CORE_CLK_RATE 350000000
+#define RPI_FIRMWARE_DEFAULT_CLK_RATE 700000000
+
#endif
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index ca4a4b1ad1..5ec2e6c1a4 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -21,12 +21,21 @@
#include "hw/sysbus.h"
#include "hw/pci/pci.h"
+#include "qom/object.h"
-#define SMMU_PCI_BUS_MAX 256
-#define SMMU_PCI_DEVFN_MAX 256
-#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
+#define SMMU_PCI_BUS_MAX 256
+#define SMMU_PCI_DEVFN_MAX 256
+#define SMMU_PCI_DEVFN(sid) (sid & 0xFF)
-#define SMMU_MAX_VA_BITS 48
+/* VMSAv8-64 Translation constants and functions */
+#define VMSA_LEVELS 4
+#define VMSA_MAX_S2_CONCAT 16
+
+#define VMSA_STRIDE(gran) ((gran) - VMSA_LEVELS + 1)
+#define VMSA_BIT_LVL(isz, strd, lvl) ((isz) - (strd) * \
+ (VMSA_LEVELS - (lvl)))
+#define VMSA_IDXMSK(isz, strd, lvl) ((1ULL << \
+ VMSA_BIT_LVL(isz, strd, lvl)) - 1)
/*
* Page table walk error types
@@ -41,6 +50,7 @@ typedef enum {
} SMMUPTWEventType;
typedef struct SMMUPTWEventInfo {
+ int stage;
SMMUPTWEventType type;
dma_addr_t addr; /* fetched address that induced an abort, if any */
} SMMUPTWEventInfo;
@@ -50,26 +60,51 @@ typedef struct SMMUTransTableInfo {
uint64_t ttb; /* TT base address */
uint8_t tsz; /* input range, ie. 2^(64 -tsz)*/
uint8_t granule_sz; /* granule page shift */
+ bool had; /* hierarchical attribute disable */
} SMMUTransTableInfo;
+typedef struct SMMUTLBEntry {
+ IOMMUTLBEntry entry;
+ uint8_t level;
+ uint8_t granule;
+} SMMUTLBEntry;
+
+/* Stage-2 configuration. */
+typedef struct SMMUS2Cfg {
+ uint8_t tsz; /* Size of IPA input region (S2T0SZ) */
+ uint8_t sl0; /* Start level of translation (S2SL0) */
+ bool affd; /* AF Fault Disable (S2AFFD) */
+ bool record_faults; /* Record fault events (S2R) */
+ uint8_t granule_sz; /* Granule page shift (based on S2TG) */
+ uint8_t eff_ps; /* Effective PA output range (based on S2PS) */
+ uint16_t vmid; /* Virtual Machine ID (S2VMID) */
+ uint64_t vttb; /* Address of translation table base (S2TTB) */
+} SMMUS2Cfg;
+
/*
* Generic structure populated by derived SMMU devices
* after decoding the configuration information and used as
* input to the page table walk
*/
typedef struct SMMUTransCfg {
+ /* Shared fields between stage-1 and stage-2. */
int stage; /* translation stage */
- bool aa64; /* arch64 or aarch32 translation table */
bool disabled; /* smmu is disabled */
bool bypassed; /* translation is bypassed */
bool aborted; /* translation is aborted */
+ bool affd; /* AF fault disable */
+ uint32_t iotlb_hits; /* counts IOTLB hits */
+ uint32_t iotlb_misses; /* counts IOTLB misses*/
+ /* Used by stage-1 only. */
+ bool aa64; /* arch64 or aarch32 translation table */
+ bool record_faults; /* record fault events */
uint64_t ttb; /* TT base address */
uint8_t oas; /* output address width */
uint8_t tbi; /* Top Byte Ignore */
uint16_t asid;
SMMUTransTableInfo tt[2];
- uint32_t iotlb_hits; /* counts IOTLB hits for this asid */
- uint32_t iotlb_misses; /* counts IOTLB misses for this asid */
+ /* Used by stage-2 only. */
+ struct SMMUS2Cfg s2cfg;
} SMMUTransCfg;
typedef struct SMMUDevice {
@@ -91,9 +126,12 @@ typedef struct SMMUPciBus {
typedef struct SMMUIOTLBKey {
uint64_t iova;
uint16_t asid;
+ uint16_t vmid;
+ uint8_t tg;
+ uint8_t level;
} SMMUIOTLBKey;
-typedef struct SMMUState {
+struct SMMUState {
/* <private> */
SysBusDevice dev;
const char *mrtypename;
@@ -107,9 +145,9 @@ typedef struct SMMUState {
QLIST_HEAD(, SMMUDevice) devices_with_notifiers;
uint8_t bus_num;
PCIBus *primary_bus;
-} SMMUState;
+};
-typedef struct {
+struct SMMUBaseClass {
/* <private> */
SysBusDeviceClass parent_class;
@@ -117,14 +155,10 @@ typedef struct {
DeviceRealize parent_realize;
-} SMMUBaseClass;
+};
#define TYPE_ARM_SMMU "arm-smmu"
-#define ARM_SMMU(obj) OBJECT_CHECK(SMMUState, (obj), TYPE_ARM_SMMU)
-#define ARM_SMMU_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMMUBaseClass, (klass), TYPE_ARM_SMMU)
-#define ARM_SMMU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_ARM_SMMU)
+OBJECT_DECLARE_TYPE(SMMUState, SMMUBaseClass, ARM_SMMU)
/* Return the SMMUPciBus handle associated to a PCI bus number */
SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num);
@@ -140,7 +174,7 @@ static inline uint16_t smmu_get_sid(SMMUDevice *sdev)
* pair, according to @cfg translation config
*/
int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
- IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info);
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info);
/**
* select_tt - compute which translation table shall be used according to
@@ -153,14 +187,18 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid);
#define SMMU_IOTLB_MAX_SIZE 256
+SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
+ SMMUTransTableInfo *tt, hwaddr iova);
+void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *entry);
+SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
+ uint8_t tg, uint8_t level);
void smmu_iotlb_inv_all(SMMUState *s);
void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid);
-void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova);
+void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid);
+void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
+ uint8_t tg, uint64_t num_pages, uint8_t ttl);
/* Unmap the range of all the notifiers registered to any IOMMU mr */
void smmu_inv_notifiers_all(SMMUState *s);
-/* Unmap the range of all the notifiers registered to @mr */
-void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr);
-
#endif /* HW_ARM_SMMU_COMMON_H */
diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h
index 36b2f45253..d183a62766 100644
--- a/include/hw/arm/smmuv3.h
+++ b/include/hw/arm/smmuv3.h
@@ -20,7 +20,7 @@
#define HW_ARM_SMMUV3_H
#include "hw/arm/smmu-common.h"
-#include "hw/registerfields.h"
+#include "qom/object.h"
#define TYPE_SMMUV3_IOMMU_MEMORY_REGION "smmuv3-iommu-memory-region"
@@ -32,7 +32,7 @@ typedef struct SMMUQueue {
uint8_t log2size;
} SMMUQueue;
-typedef struct SMMUv3State {
+struct SMMUv3State {
SMMUState smmu_state;
uint32_t features;
@@ -41,9 +41,11 @@ typedef struct SMMUv3State {
uint32_t idr[6];
uint32_t iidr;
+ uint32_t aidr;
uint32_t cr[3];
uint32_t cr0ack;
uint32_t statusr;
+ uint32_t gbpa;
uint32_t irq_ctrl;
uint32_t gerror;
uint32_t gerrorn;
@@ -60,7 +62,8 @@ typedef struct SMMUv3State {
qemu_irq irq[4];
QemuMutex mutex;
-} SMMUv3State;
+ char *stage;
+};
typedef enum {
SMMU_IRQ_EVTQ,
@@ -69,20 +72,19 @@ typedef enum {
SMMU_IRQ_GERROR,
} SMMUIrq;
-typedef struct {
+struct SMMUv3Class {
/*< private >*/
SMMUBaseClass smmu_base_class;
/*< public >*/
DeviceRealize parent_realize;
- DeviceReset parent_reset;
-} SMMUv3Class;
+ ResettablePhases parent_phases;
+};
#define TYPE_ARM_SMMUV3 "arm-smmuv3"
-#define ARM_SMMUV3(obj) OBJECT_CHECK(SMMUv3State, (obj), TYPE_ARM_SMMUV3)
-#define ARM_SMMUV3_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMMUv3Class, (klass), TYPE_ARM_SMMUV3)
-#define ARM_SMMUV3_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMMUv3Class, (obj), TYPE_ARM_SMMUV3)
+OBJECT_DECLARE_TYPE(SMMUv3State, SMMUv3Class, ARM_SMMUV3)
+
+#define STAGE1_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S1P)
+#define STAGE2_SUPPORTED(s) FIELD_EX32(s->idr[0], IDR0, S2P)
#endif
diff --git a/include/hw/arm/stm32f100_soc.h b/include/hw/arm/stm32f100_soc.h
new file mode 100644
index 0000000000..a74d7b369c
--- /dev/null
+++ b/include/hw/arm/stm32f100_soc.h
@@ -0,0 +1,61 @@
+/*
+ * STM32F100 SoC
+ *
+ * Copyright (c) 2021 Alexandre Iooss <erdnaxe@crans.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_ARM_STM32F100_SOC_H
+#define HW_ARM_STM32F100_SOC_H
+
+#include "hw/char/stm32f2xx_usart.h"
+#include "hw/ssi/stm32f2xx_spi.h"
+#include "hw/arm/armv7m.h"
+#include "qom/object.h"
+#include "hw/clock.h"
+
+#define TYPE_STM32F100_SOC "stm32f100-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F100State, STM32F100_SOC)
+
+#define STM_NUM_USARTS 3
+#define STM_NUM_SPIS 2
+
+#define FLASH_BASE_ADDRESS 0x08000000
+#define FLASH_SIZE (128 * 1024)
+#define SRAM_BASE_ADDRESS 0x20000000
+#define SRAM_SIZE (8 * 1024)
+
+struct STM32F100State {
+ SysBusDevice parent_obj;
+
+ ARMv7MState armv7m;
+
+ STM32F2XXUsartState usart[STM_NUM_USARTS];
+ STM32F2XXSPIState spi[STM_NUM_SPIS];
+
+ MemoryRegion sram;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+
+ Clock *sysclk;
+ Clock *refclk;
+};
+
+#endif
diff --git a/include/hw/arm/stm32f205_soc.h b/include/hw/arm/stm32f205_soc.h
index 922a733f88..4f4c8bbebc 100644
--- a/include/hw/arm/stm32f205_soc.h
+++ b/include/hw/arm/stm32f205_soc.h
@@ -32,10 +32,11 @@
#include "hw/or-irq.h"
#include "hw/ssi/stm32f2xx_spi.h"
#include "hw/arm/armv7m.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_STM32F205_SOC "stm32f205-soc"
-#define STM32F205_SOC(obj) \
- OBJECT_CHECK(STM32F205State, (obj), TYPE_STM32F205_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F205State, STM32F205_SOC)
#define STM_NUM_USARTS 6
#define STM_NUM_TIMERS 4
@@ -47,12 +48,8 @@
#define SRAM_BASE_ADDRESS 0x20000000
#define SRAM_SIZE (128 * 1024)
-typedef struct STM32F205State {
- /*< private >*/
+struct STM32F205State {
SysBusDevice parent_obj;
- /*< public >*/
-
- char *cpu_type;
ARMv7MState armv7m;
@@ -62,7 +59,14 @@ typedef struct STM32F205State {
STM32F2XXADCState adc[STM_NUM_ADCS];
STM32F2XXSPIState spi[STM_NUM_SPIS];
- qemu_or_irq *adc_irqs;
-} STM32F205State;
+ OrIRQState *adc_irqs;
+
+ MemoryRegion sram;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+
+ Clock *sysclk;
+ Clock *refclk;
+};
#endif
diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h
index 1fe97f8c3a..d15c03c4b5 100644
--- a/include/hw/arm/stm32f405_soc.h
+++ b/include/hw/arm/stm32f405_soc.h
@@ -33,10 +33,10 @@
#include "hw/or-irq.h"
#include "hw/ssi/stm32f2xx_spi.h"
#include "hw/arm/armv7m.h"
+#include "qom/object.h"
#define TYPE_STM32F405_SOC "stm32f405-soc"
-#define STM32F405_SOC(obj) \
- OBJECT_CHECK(STM32F405State, (obj), TYPE_STM32F405_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F405State, STM32F405_SOC)
#define STM_NUM_USARTS 7
#define STM_NUM_TIMERS 4
@@ -46,14 +46,12 @@
#define FLASH_BASE_ADDRESS 0x08000000
#define FLASH_SIZE (1024 * 1024)
#define SRAM_BASE_ADDRESS 0x20000000
-#define SRAM_SIZE (192 * 1024)
+#define SRAM_SIZE (128 * 1024)
+#define CCM_BASE_ADDRESS 0x10000000
+#define CCM_SIZE (64 * 1024)
-typedef struct STM32F405State {
- /*< private >*/
+struct STM32F405State {
SysBusDevice parent_obj;
- /*< public >*/
-
- char *cpu_type;
ARMv7MState armv7m;
@@ -61,13 +59,17 @@ typedef struct STM32F405State {
STM32F4xxExtiState exti;
STM32F2XXUsartState usart[STM_NUM_USARTS];
STM32F2XXTimerState timer[STM_NUM_TIMERS];
- qemu_or_irq adc_irqs;
+ OrIRQState adc_irqs;
STM32F2XXADCState adc[STM_NUM_ADCS];
STM32F2XXSPIState spi[STM_NUM_SPIS];
+ MemoryRegion ccm;
MemoryRegion sram;
MemoryRegion flash;
MemoryRegion flash_alias;
-} STM32F405State;
+
+ Clock *sysclk;
+ Clock *refclk;
+};
#endif
diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h
new file mode 100644
index 0000000000..ee5f362405
--- /dev/null
+++ b/include/hw/arm/stm32l4x5_soc.h
@@ -0,0 +1,67 @@
+/*
+ * STM32L4x5 SoC family
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is heavily inspired by the stm32f405_soc by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_ARM_STM32L4x5_SOC_H
+#define HW_ARM_STM32L4x5_SOC_H
+
+#include "exec/memory.h"
+#include "hw/arm/armv7m.h"
+#include "hw/or-irq.h"
+#include "hw/misc/stm32l4x5_syscfg.h"
+#include "hw/misc/stm32l4x5_exti.h"
+#include "hw/misc/stm32l4x5_rcc.h"
+#include "hw/gpio/stm32l4x5_gpio.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_SOC "stm32l4x5-soc"
+#define TYPE_STM32L4X5XC_SOC "stm32l4x5xc-soc"
+#define TYPE_STM32L4X5XE_SOC "stm32l4x5xe-soc"
+#define TYPE_STM32L4X5XG_SOC "stm32l4x5xg-soc"
+OBJECT_DECLARE_TYPE(Stm32l4x5SocState, Stm32l4x5SocClass, STM32L4X5_SOC)
+
+#define NUM_EXTI_OR_GATES 4
+
+struct Stm32l4x5SocState {
+ SysBusDevice parent_obj;
+
+ ARMv7MState armv7m;
+
+ Stm32l4x5ExtiState exti;
+ OrIRQState exti_or_gates[NUM_EXTI_OR_GATES];
+ Stm32l4x5SyscfgState syscfg;
+ Stm32l4x5RccState rcc;
+ Stm32l4x5GpioState gpio[NUM_GPIOS];
+
+ MemoryRegion sram1;
+ MemoryRegion sram2;
+ MemoryRegion flash;
+ MemoryRegion flash_alias;
+};
+
+struct Stm32l4x5SocClass {
+ SysBusDeviceClass parent_class;
+
+ size_t flash_size;
+};
+
+#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index dff67e1bef..bb486d36b1 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -34,24 +34,18 @@
#include "qemu/notify.h"
#include "hw/boards.h"
#include "hw/arm/boot.h"
+#include "hw/arm/bsa.h"
#include "hw/block/flash.h"
#include "sysemu/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
+#include "qom/object.h"
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
#define NUM_SMMU_IRQS 4
-#define ARCH_GIC_MAINT_IRQ 9
-
-#define ARCH_TIMER_VIRT_IRQ 11
-#define ARCH_TIMER_S_EL1_IRQ 13
-#define ARCH_TIMER_NS_EL1_IRQ 14
-#define ARCH_TIMER_NS_EL2_IRQ 10
-
-#define VIRTUAL_PMU_IRQ 7
-
-#define PPI(irq) ((irq) + 16)
+/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
+#define PVTIME_SIZE_PER_CPU 64
enum {
VIRT_FLASH,
@@ -77,9 +71,11 @@ enum {
VIRT_GPIO,
VIRT_SECURE_UART,
VIRT_SECURE_MEM,
+ VIRT_SECURE_GPIO,
VIRT_PCDIMM_ACPI,
VIRT_ACPI_GED,
VIRT_NVDIMM_ACPI,
+ VIRT_PVTIME,
VIRT_LOWMEMMAP_LAST,
};
@@ -103,32 +99,41 @@ typedef enum VirtMSIControllerType {
} VirtMSIControllerType;
typedef enum VirtGICType {
- VIRT_GIC_VERSION_MAX,
- VIRT_GIC_VERSION_HOST,
- VIRT_GIC_VERSION_2,
- VIRT_GIC_VERSION_3,
+ VIRT_GIC_VERSION_MAX = 0,
+ VIRT_GIC_VERSION_HOST = 1,
+ /* The concrete GIC values have to match the GIC version number */
+ VIRT_GIC_VERSION_2 = 2,
+ VIRT_GIC_VERSION_3 = 3,
+ VIRT_GIC_VERSION_4 = 4,
VIRT_GIC_VERSION_NOSEL,
} VirtGICType;
-typedef struct MemMapEntry {
- hwaddr base;
- hwaddr size;
-} MemMapEntry;
+#define VIRT_GIC_VERSION_2_MASK BIT(VIRT_GIC_VERSION_2)
+#define VIRT_GIC_VERSION_3_MASK BIT(VIRT_GIC_VERSION_3)
+#define VIRT_GIC_VERSION_4_MASK BIT(VIRT_GIC_VERSION_4)
-typedef struct {
+struct VirtMachineClass {
MachineClass parent;
bool disallow_affinity_adjustment;
bool no_its;
+ bool no_tcg_its;
bool no_pmu;
bool claim_edge_triggered_timers;
bool smbios_old_sys_ver;
+ bool no_highmem_compact;
bool no_highmem_ecam;
- bool no_ged; /* Machines < 4.2 has no support for ACPI GED device */
+ bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */
bool kvm_no_adjvtime;
+ bool no_kvm_steal_time;
bool acpi_expose_flash;
-} VirtMachineClass;
+ bool no_secure_gpio;
+ /* Machines < 6.2 have no support for describing cpu topology to guest */
+ bool no_cpu_topology;
+ bool no_tcg_lpa2;
+ bool no_ns_el2_virt_timer_irq;
+};
-typedef struct {
+struct VirtMachineState {
MachineState parent;
Notifier machine_done;
DeviceState *platform_bus_dev;
@@ -136,22 +141,26 @@ typedef struct {
PFlashCFI01 *flash[2];
bool secure;
bool highmem;
+ bool highmem_compact;
bool highmem_ecam;
+ bool highmem_mmio;
+ bool highmem_redists;
bool its;
+ bool tcg_its;
bool virt;
bool ras;
bool mte;
+ bool dtb_randomness;
OnOffAuto acpi;
VirtGICType gic_version;
VirtIOMMUType iommu;
+ bool default_bus_bypass_iommu;
VirtMSIControllerType msi_controller;
uint16_t virtio_iommu_bdf;
struct arm_boot_info bootinfo;
MemMapEntry *memmap;
char *pciehb_nodename;
const int *irqmap;
- int smp_cpus;
- void *fdt;
int fdt_size;
uint32_t clock_phandle;
uint32_t gic_phandle;
@@ -162,30 +171,42 @@ typedef struct {
DeviceState *gic;
DeviceState *acpi_dev;
Notifier powerdown_notifier;
-} VirtMachineState;
+ PCIBus *bus;
+ char *oem_id;
+ char *oem_table_id;
+ bool ns_el2_virt_timer_irq;
+};
#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
-#define VIRT_MACHINE(obj) \
- OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtMachineClass, obj, TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE)
+OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
void virt_acpi_setup(VirtMachineState *vms);
bool virt_is_acpi_enabled(VirtMachineState *vms);
+/* Return number of redistributors that fit in the specified region */
+static uint32_t virt_redist_capacity(VirtMachineState *vms, int region)
+{
+ uint32_t redist_size;
+
+ if (vms->gic_version == VIRT_GIC_VERSION_3) {
+ redist_size = GICV3_REDIST_SIZE;
+ } else {
+ redist_size = GICV4_REDIST_SIZE;
+ }
+ return vms->memmap[region].size / redist_size;
+}
+
/* Return the number of used redistributor regions */
static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
{
- uint32_t redist0_capacity =
- vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
+ uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST);
- assert(vms->gic_version == VIRT_GIC_VERSION_3);
+ assert(vms->gic_version != VIRT_GIC_VERSION_2);
- return vms->smp_cpus > redist0_capacity ? 2 : 1;
+ return (MACHINE(vms)->smp.cpus > redist0_capacity &&
+ vms->highmem_redists) ? 2 : 1;
}
#endif /* QEMU_ARM_VIRT_H */
diff --git a/include/hw/arm/xen_arch_hvm.h b/include/hw/arm/xen_arch_hvm.h
new file mode 100644
index 0000000000..8fd645e723
--- /dev/null
+++ b/include/hw/arm/xen_arch_hvm.h
@@ -0,0 +1,9 @@
+#ifndef HW_XEN_ARCH_ARM_HVM_H
+#define HW_XEN_ARCH_ARM_HVM_H
+
+#include <xen/hvm/ioreq.h>
+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
+void arch_xen_set_memory(XenIOState *state,
+ MemoryRegionSection *section,
+ bool add);
+#endif
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index 9c9f47ba9d..025beb5532 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -13,25 +13,45 @@
#define XLNX_VERSAL_H
#include "hw/sysbus.h"
-#include "hw/arm/boot.h"
+#include "hw/cpu/cluster.h"
+#include "hw/or-irq.h"
#include "hw/sd/sdhci.h"
#include "hw/intc/arm_gicv3.h"
#include "hw/char/pl011.h"
#include "hw/dma/xlnx-zdma.h"
#include "hw/net/cadence_gem.h"
#include "hw/rtc/xlnx-zynqmp-rtc.h"
+#include "qom/object.h"
+#include "hw/usb/xlnx-usb-subsystem.h"
+#include "hw/misc/xlnx-versal-xramc.h"
+#include "hw/nvram/xlnx-bbram.h"
+#include "hw/nvram/xlnx-versal-efuse.h"
+#include "hw/ssi/xlnx-versal-ospi.h"
+#include "hw/dma/xlnx_csu_dma.h"
+#include "hw/misc/xlnx-versal-crl.h"
+#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
+#include "hw/misc/xlnx-versal-trng.h"
+#include "hw/net/xlnx-versal-canfd.h"
+#include "hw/misc/xlnx-versal-cfu.h"
+#include "hw/misc/xlnx-versal-cframe-reg.h"
+#include "target/arm/cpu.h"
#define TYPE_XLNX_VERSAL "xlnx-versal"
-#define XLNX_VERSAL(obj) OBJECT_CHECK(Versal, (obj), TYPE_XLNX_VERSAL)
+OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
#define XLNX_VERSAL_NR_ACPUS 2
+#define XLNX_VERSAL_NR_RCPUS 2
#define XLNX_VERSAL_NR_UARTS 2
#define XLNX_VERSAL_NR_GEMS 2
#define XLNX_VERSAL_NR_ADMAS 8
#define XLNX_VERSAL_NR_SDS 2
+#define XLNX_VERSAL_NR_XRAM 4
#define XLNX_VERSAL_NR_IRQS 192
+#define XLNX_VERSAL_NR_CANFD 2
+#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000)
+#define XLNX_VERSAL_NR_CFRAME 15
-typedef struct Versal {
+struct Versal {
/*< private >*/
SysBusDevice parent_obj;
@@ -39,6 +59,7 @@ typedef struct Versal {
struct {
struct {
MemoryRegion mr;
+ CPUClusterState cluster;
ARMCPU cpu[XLNX_VERSAL_NR_ACPUS];
GICv3State gic;
} apu;
@@ -58,23 +79,62 @@ typedef struct Versal {
PL011State uart[XLNX_VERSAL_NR_UARTS];
CadenceGEMState gem[XLNX_VERSAL_NR_GEMS];
XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS];
+ VersalUsb2 usb;
+ CanBusState *canbus[XLNX_VERSAL_NR_CANFD];
+ XlnxVersalCANFDState canfd[XLNX_VERSAL_NR_CANFD];
} iou;
+
+ /* Real-time Processing Unit. */
+ struct {
+ MemoryRegion mr;
+ MemoryRegion mr_ps_alias;
+
+ CPUClusterState cluster;
+ ARMCPU cpu[XLNX_VERSAL_NR_RCPUS];
+ } rpu;
+
+ struct {
+ OrIRQState irq_orgate;
+ XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM];
+ } xram;
+
+ XlnxVersalCRL crl;
} lpd;
/* The Platform Management Controller subsystem. */
struct {
struct {
SDHCIState sd[XLNX_VERSAL_NR_SDS];
+ XlnxVersalPmcIouSlcr slcr;
+
+ struct {
+ XlnxVersalOspi ospi;
+ XlnxCSUDMA dma_src;
+ XlnxCSUDMA dma_dst;
+ MemoryRegion linear_mr;
+ OrIRQState irq_orgate;
+ } ospi;
} iou;
XlnxZynqMPRTC rtc;
+ XlnxVersalTRng trng;
+ XlnxBBRam bbram;
+ XlnxEFuse efuse;
+ XlnxVersalEFuseCtrl efuse_ctrl;
+ XlnxVersalEFuseCache efuse_cache;
+ XlnxVersalCFUAPB cfu_apb;
+ XlnxVersalCFUFDRO cfu_fdro;
+ XlnxVersalCFUSFR cfu_sfr;
+ XlnxVersalCFrameReg cframe[XLNX_VERSAL_NR_CFRAME];
+ XlnxVersalCFrameBcastReg cframe_bcast;
+
+ OrIRQState apb_irq_orgate;
} pmc;
struct {
MemoryRegion *mr_ddr;
- uint32_t psci_conduit;
} cfg;
-} Versal;
+};
/* Memory-map and IRQ definitions. Copied a subset from
* auto-generated files. */
@@ -85,15 +145,24 @@ typedef struct Versal {
#define VERSAL_TIMER_NS_EL1_IRQ 14
#define VERSAL_TIMER_NS_EL2_IRQ 10
+#define VERSAL_CRL_IRQ 10
#define VERSAL_UART0_IRQ_0 18
#define VERSAL_UART1_IRQ_0 19
+#define VERSAL_CANFD0_IRQ_0 20
+#define VERSAL_CANFD1_IRQ_0 21
+#define VERSAL_USB0_IRQ_0 22
#define VERSAL_GEM0_IRQ_0 56
#define VERSAL_GEM0_WAKE_IRQ_0 57
#define VERSAL_GEM1_IRQ_0 58
#define VERSAL_GEM1_WAKE_IRQ_0 59
#define VERSAL_ADMA_IRQ_0 60
-#define VERSAL_RTC_APB_ERR_IRQ 121
+#define VERSAL_XRAM_IRQ_0 79
+#define VERSAL_CFU_IRQ_0 120
+#define VERSAL_PMC_APB_IRQ 121
+#define VERSAL_OSPI_IRQ 124
#define VERSAL_SD0_IRQ_0 126
+#define VERSAL_EFUSE_IRQ 139
+#define VERSAL_TRNG_IRQ 141
#define VERSAL_RTC_ALARM_IRQ 142
#define VERSAL_RTC_SECONDS_IRQ 143
@@ -113,6 +182,11 @@ typedef struct Versal {
#define MM_UART1 0xff010000U
#define MM_UART1_SIZE 0x10000
+#define MM_CANFD0 0xff060000U
+#define MM_CANFD0_SIZE 0x10000
+#define MM_CANFD1 0xff070000U
+#define MM_CANFD1_SIZE 0x10000
+
#define MM_GEM0 0xff0c0000U
#define MM_GEM0_SIZE 0x10000
#define MM_GEM1 0xff0d0000U
@@ -124,6 +198,16 @@ typedef struct Versal {
#define MM_OCM 0xfffc0000U
#define MM_OCM_SIZE 0x40000
+#define MM_XRAM 0xfe800000
+#define MM_XRAMC 0xff8e0000
+#define MM_XRAMC_SIZE 0x10000
+
+#define MM_USB2_CTRL_REGS 0xFF9D0000
+#define MM_USB2_CTRL_REGS_SIZE 0x10000
+
+#define MM_USB_0 0xFE200000
+#define MM_USB_0_SIZE 0x10000
+
#define MM_TOP_DDR 0x0
#define MM_TOP_DDR_SIZE 0x80000000U
#define MM_TOP_DDR_2 0x800000000ULL
@@ -144,11 +228,110 @@ typedef struct Versal {
#define MM_IOU_SCNTRS_SIZE 0x10000
#define MM_FPD_CRF 0xfd1a0000U
#define MM_FPD_CRF_SIZE 0x140000
+#define MM_FPD_FPD_APU 0xfd5c0000
+#define MM_FPD_FPD_APU_SIZE 0x100
+
+#define MM_PMC_PMC_IOU_SLCR 0xf1060000
+#define MM_PMC_PMC_IOU_SLCR_SIZE 0x10000
+
+#define MM_PMC_OSPI 0xf1010000
+#define MM_PMC_OSPI_SIZE 0x10000
+
+#define MM_PMC_OSPI_DAC 0xc0000000
+#define MM_PMC_OSPI_DAC_SIZE 0x20000000
+
+#define MM_PMC_OSPI_DMA_DST 0xf1011800
+#define MM_PMC_OSPI_DMA_SRC 0xf1011000
#define MM_PMC_SD0 0xf1040000U
#define MM_PMC_SD0_SIZE 0x10000
+#define MM_PMC_BBRAM_CTRL 0xf11f0000
+#define MM_PMC_BBRAM_CTRL_SIZE 0x00050
+#define MM_PMC_EFUSE_CTRL 0xf1240000
+#define MM_PMC_EFUSE_CTRL_SIZE 0x00104
+#define MM_PMC_EFUSE_CACHE 0xf1250000
+#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00
+
+#define MM_PMC_CFU_APB 0xf12b0000
+#define MM_PMC_CFU_APB_SIZE 0x10000
+#define MM_PMC_CFU_STREAM 0xf12c0000
+#define MM_PMC_CFU_STREAM_SIZE 0x1000
+#define MM_PMC_CFU_SFR 0xf12c1000
+#define MM_PMC_CFU_SFR_SIZE 0x1000
+#define MM_PMC_CFU_FDRO 0xf12c2000
+#define MM_PMC_CFU_FDRO_SIZE 0x1000
+#define MM_PMC_CFU_STREAM_2 0xf1f80000
+#define MM_PMC_CFU_STREAM_2_SIZE 0x40000
+
+#define MM_PMC_CFRAME0_REG 0xf12d0000
+#define MM_PMC_CFRAME0_REG_SIZE 0x1000
+#define MM_PMC_CFRAME0_FDRI 0xf12d1000
+#define MM_PMC_CFRAME0_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME1_REG 0xf12d2000
+#define MM_PMC_CFRAME1_REG_SIZE 0x1000
+#define MM_PMC_CFRAME1_FDRI 0xf12d3000
+#define MM_PMC_CFRAME1_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME2_REG 0xf12d4000
+#define MM_PMC_CFRAME2_REG_SIZE 0x1000
+#define MM_PMC_CFRAME2_FDRI 0xf12d5000
+#define MM_PMC_CFRAME2_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME3_REG 0xf12d6000
+#define MM_PMC_CFRAME3_REG_SIZE 0x1000
+#define MM_PMC_CFRAME3_FDRI 0xf12d7000
+#define MM_PMC_CFRAME3_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME4_REG 0xf12d8000
+#define MM_PMC_CFRAME4_REG_SIZE 0x1000
+#define MM_PMC_CFRAME4_FDRI 0xf12d9000
+#define MM_PMC_CFRAME4_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME5_REG 0xf12da000
+#define MM_PMC_CFRAME5_REG_SIZE 0x1000
+#define MM_PMC_CFRAME5_FDRI 0xf12db000
+#define MM_PMC_CFRAME5_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME6_REG 0xf12dc000
+#define MM_PMC_CFRAME6_REG_SIZE 0x1000
+#define MM_PMC_CFRAME6_FDRI 0xf12dd000
+#define MM_PMC_CFRAME6_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME7_REG 0xf12de000
+#define MM_PMC_CFRAME7_REG_SIZE 0x1000
+#define MM_PMC_CFRAME7_FDRI 0xf12df000
+#define MM_PMC_CFRAME7_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME8_REG 0xf12e0000
+#define MM_PMC_CFRAME8_REG_SIZE 0x1000
+#define MM_PMC_CFRAME8_FDRI 0xf12e1000
+#define MM_PMC_CFRAME8_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME9_REG 0xf12e2000
+#define MM_PMC_CFRAME9_REG_SIZE 0x1000
+#define MM_PMC_CFRAME9_FDRI 0xf12e3000
+#define MM_PMC_CFRAME9_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME10_REG 0xf12e4000
+#define MM_PMC_CFRAME10_REG_SIZE 0x1000
+#define MM_PMC_CFRAME10_FDRI 0xf12e5000
+#define MM_PMC_CFRAME10_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME11_REG 0xf12e6000
+#define MM_PMC_CFRAME11_REG_SIZE 0x1000
+#define MM_PMC_CFRAME11_FDRI 0xf12e7000
+#define MM_PMC_CFRAME11_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME12_REG 0xf12e8000
+#define MM_PMC_CFRAME12_REG_SIZE 0x1000
+#define MM_PMC_CFRAME12_FDRI 0xf12e9000
+#define MM_PMC_CFRAME12_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME13_REG 0xf12ea000
+#define MM_PMC_CFRAME13_REG_SIZE 0x1000
+#define MM_PMC_CFRAME13_FDRI 0xf12eb000
+#define MM_PMC_CFRAME13_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME14_REG 0xf12ec000
+#define MM_PMC_CFRAME14_REG_SIZE 0x1000
+#define MM_PMC_CFRAME14_FDRI 0xf12ed000
+#define MM_PMC_CFRAME14_FDRI_SIZE 0x1000
+#define MM_PMC_CFRAME_BCAST_REG 0xf12ee000
+#define MM_PMC_CFRAME_BCAST_REG_SIZE 0x1000
+#define MM_PMC_CFRAME_BCAST_FDRI 0xf12ef000
+#define MM_PMC_CFRAME_BCAST_FDRI_SIZE 0x1000
+
#define MM_PMC_CRP 0xf1260000U
#define MM_PMC_CRP_SIZE 0x10000
#define MM_PMC_RTC 0xf12a0000
#define MM_PMC_RTC_SIZE 0x10000
+#define MM_PMC_TRNG 0xf1230000
+#define MM_PMC_TRNG_SIZE 0x10000
#endif
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 53076fa29a..48f7948092 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -18,11 +18,11 @@
#ifndef XLNX_ZYNQMP_H
#define XLNX_ZYNQMP_H
-#include "hw/arm/boot.h"
#include "hw/intc/arm_gic.h"
#include "hw/net/cadence_gem.h"
#include "hw/char/cadence_uart.h"
-#include "hw/ide/ahci.h"
+#include "hw/net/xlnx-zynqmp-can.h"
+#include "hw/ide/ahci-sysbus.h"
#include "hw/sd/sdhci.h"
#include "hw/ssi/xilinx_spips.h"
#include "hw/dma/xlnx_dpdma.h"
@@ -32,19 +32,31 @@
#include "hw/rtc/xlnx-zynqmp-rtc.h"
#include "hw/cpu/cluster.h"
#include "target/arm/cpu.h"
-
-#define TYPE_XLNX_ZYNQMP "xlnx,zynqmp"
-#define XLNX_ZYNQMP(obj) OBJECT_CHECK(XlnxZynqMPState, (obj), \
- TYPE_XLNX_ZYNQMP)
+#include "qom/object.h"
+#include "net/can_emu.h"
+#include "hw/dma/xlnx_csu_dma.h"
+#include "hw/nvram/xlnx-bbram.h"
+#include "hw/nvram/xlnx-zynqmp-efuse.h"
+#include "hw/or-irq.h"
+#include "hw/misc/xlnx-zynqmp-apu-ctrl.h"
+#include "hw/misc/xlnx-zynqmp-crf.h"
+#include "hw/timer/cadence_ttc.h"
+#include "hw/usb/hcd-dwc3.h"
+
+#define TYPE_XLNX_ZYNQMP "xlnx-zynqmp"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
#define XLNX_ZYNQMP_NUM_APU_CPUS 4
#define XLNX_ZYNQMP_NUM_RPU_CPUS 2
#define XLNX_ZYNQMP_NUM_GEMS 4
#define XLNX_ZYNQMP_NUM_UARTS 2
+#define XLNX_ZYNQMP_NUM_CAN 2
+#define XLNX_ZYNQMP_CAN_REF_CLK (24 * 1000 * 1000)
#define XLNX_ZYNQMP_NUM_SDHCI 2
#define XLNX_ZYNQMP_NUM_SPIS 2
#define XLNX_ZYNQMP_NUM_GDMA_CH 8
#define XLNX_ZYNQMP_NUM_ADMA_CH 8
+#define XLNX_ZYNQMP_NUM_USB 2
#define XLNX_ZYNQMP_NUM_QSPI_BUS 2
#define XLNX_ZYNQMP_NUM_QSPI_BUS_CS 2
@@ -56,7 +68,8 @@
#define XLNX_ZYNQMP_GIC_REGIONS 6
-/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
+/*
+ * ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
* and under-decodes the 64k region. This mirrors the 4k regions to every 4k
* aligned address in the 64k region. To implement each GIC region needs a
* number of memory region aliases.
@@ -73,7 +86,14 @@
#define XLNX_ZYNQMP_MAX_RAM_SIZE (XLNX_ZYNQMP_MAX_LOW_RAM_SIZE + \
XLNX_ZYNQMP_MAX_HIGH_RAM_SIZE)
-typedef struct XlnxZynqMPState {
+#define XLNX_ZYNQMP_NUM_TTC 4
+
+/*
+ * Unimplemented mmio regions needed to boot some images.
+ */
+#define XLNX_ZYNQMP_NUM_UNIMP_AREAS 1
+
+struct XlnxZynqMPState {
/*< private >*/
DeviceState parent_obj;
@@ -89,9 +109,15 @@ typedef struct XlnxZynqMPState {
MemoryRegion *ddr_ram;
MemoryRegion ddr_ram_low, ddr_ram_high;
+ XlnxBBRam bbram;
+ XlnxEFuse efuse;
+ XlnxZynqMPEFuse efuse_ctrl;
+
+ MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS];
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
+ XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN];
SysbusAHCIState sata;
SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI];
XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS];
@@ -102,6 +128,12 @@ typedef struct XlnxZynqMPState {
XlnxZynqMPRTC rtc;
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
+ XlnxCSUDMA qspi_dma;
+ OrIRQState qspi_irq_orgate;
+ XlnxZynqMPAPUCtrl apu_ctrl;
+ XlnxZynqMPCRF crf;
+ CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC];
+ USBDWC3 usb[XLNX_ZYNQMP_NUM_USB];
char *boot_cpu;
ARMCPU *boot_cpu_ptr;
@@ -110,8 +142,9 @@ typedef struct XlnxZynqMPState {
bool secure;
/* Has the ARM Virtualization extensions? */
bool virt;
- /* Has the RPU subsystem? */
- bool has_rpu;
-} XlnxZynqMPState;
+
+ /* CAN bus. */
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
+};
#endif
diff --git a/include/hw/audio/asc.h b/include/hw/audio/asc.h
new file mode 100644
index 0000000000..04fac270b6
--- /dev/null
+++ b/include/hw/audio/asc.h
@@ -0,0 +1,85 @@
+/*
+ * QEMU Apple Sound Chip emulation
+ *
+ * Apple Sound Chip (ASC) 344S0063
+ * Enhanced Apple Sound Chip (EASC) 343S1063
+ *
+ * Copyright (c) 2012-2018 Laurent Vivier <laurent@vivier.eu>
+ * Copyright (c) 2022 Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_AUDIO_ASC_H
+#define HW_AUDIO_ASC_H
+
+#include "hw/sysbus.h"
+#include "audio/audio.h"
+
+#define ASC_FREQ 22257
+
+enum {
+ ASC_TYPE_ASC = 0, /* original discrete Apple Sound Chip */
+ ASC_TYPE_EASC = 1 /* discrete Enhanced Apple Sound Chip */
+};
+
+#define ASC_FIFO_OFFSET 0x0
+#define ASC_FIFO_SIZE 0x400
+
+#define ASC_REG_OFFSET 0x800
+#define ASC_REG_SIZE 0x60
+
+#define ASC_EXTREG_OFFSET 0xf00
+#define ASC_EXTREG_SIZE 0x20
+
+typedef struct ASCFIFOState {
+ int index;
+
+ MemoryRegion mem_fifo;
+ uint8_t fifo[ASC_FIFO_SIZE];
+ uint8_t int_status;
+
+ int cnt;
+ int wptr;
+ int rptr;
+
+ MemoryRegion mem_extregs;
+ uint8_t extregs[ASC_EXTREG_SIZE];
+
+ int xa_cnt;
+ uint8_t xa_val;
+ uint8_t xa_flags;
+ int16_t xa_last[2];
+} ASCFIFOState;
+
+struct ASCState {
+ SysBusDevice parent_obj;
+
+ uint8_t type;
+ MemoryRegion asc;
+ MemoryRegion mem_fifo;
+ MemoryRegion mem_regs;
+ MemoryRegion mem_extregs;
+
+ QEMUSoundCard card;
+ SWVoiceOut *voice;
+ uint8_t *mixbuf;
+ int samples;
+ int shift;
+
+ uint8_t *silentbuf;
+
+ /* Time when we were last able to generate samples */
+ int64_t fifo_empty_ns;
+
+ qemu_irq irq;
+
+ ASCFIFOState fifos[2];
+
+ uint8_t regs[ASC_REG_SIZE];
+};
+
+#define TYPE_ASC "apple-sound-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(ASCState, ASC)
+
+#endif
diff --git a/include/hw/audio/pcspk.h b/include/hw/audio/pcspk.h
index 9506179587..6be75a6b86 100644
--- a/include/hw/audio/pcspk.h
+++ b/include/hw/audio/pcspk.h
@@ -25,16 +25,6 @@
#ifndef HW_PCSPK_H
#define HW_PCSPK_H
-#include "hw/isa/isa.h"
-#include "hw/qdev-properties.h"
-#include "qapi/error.h"
-
#define TYPE_PC_SPEAKER "isa-pcspk"
-static inline void pcspk_init(ISADevice *isadev, ISABus *bus, ISADevice *pit)
-{
- object_property_set_link(OBJECT(isadev), "pit", OBJECT(pit), NULL);
- isa_realize_and_unref(isadev, bus, &error_fatal);
-}
-
#endif /* HW_PCSPK_H */
diff --git a/include/hw/audio/soundhw.h b/include/hw/audio/soundhw.h
index f09a297854..474c5ff94e 100644
--- a/include/hw/audio/soundhw.h
+++ b/include/hw/audio/soundhw.h
@@ -1,15 +1,13 @@
#ifndef HW_SOUNDHW_H
#define HW_SOUNDHW_H
-void isa_register_soundhw(const char *name, const char *descr,
- int (*init_isa)(ISABus *bus));
-
void pci_register_soundhw(const char *name, const char *descr,
- int (*init_pci)(PCIBus *bus));
+ int (*init_pci)(PCIBus *bus, const char *audiodev));
void deprecated_register_soundhw(const char *name, const char *descr,
int isa, const char *typename);
void soundhw_init(void);
-void select_soundhw(const char *optarg);
+void show_valid_soundhw(void);
+void select_soundhw(const char *name, const char *audiodev);
#endif
diff --git a/include/hw/audio/virtio-snd.h b/include/hw/audio/virtio-snd.h
new file mode 100644
index 0000000000..8dafedb276
--- /dev/null
+++ b/include/hw/audio/virtio-snd.h
@@ -0,0 +1,250 @@
+/*
+ * VIRTIO Sound Device conforming to
+ *
+ * "Virtual I/O Device (VIRTIO) Version 1.2
+ * Committee Specification Draft 01
+ * 09 May 2022"
+ *
+ * Copyright (c) 2023 Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
+ * Copyright (C) 2019 OpenSynergy GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_SOUND_H
+#define QEMU_VIRTIO_SOUND_H
+
+#include "hw/virtio/virtio.h"
+#include "audio/audio.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_snd.h"
+
+#define TYPE_VIRTIO_SND "virtio-sound-device"
+#define VIRTIO_SND(obj) \
+ OBJECT_CHECK(VirtIOSound, (obj), TYPE_VIRTIO_SND)
+
+/* CONFIGURATION SPACE */
+
+typedef struct virtio_snd_config virtio_snd_config;
+
+/* COMMON DEFINITIONS */
+
+/* common header for request/response*/
+typedef struct virtio_snd_hdr virtio_snd_hdr;
+
+/* event notification */
+typedef struct virtio_snd_event virtio_snd_event;
+
+/* common control request to query an item information */
+typedef struct virtio_snd_query_info virtio_snd_query_info;
+
+/* JACK CONTROL MESSAGES */
+
+typedef struct virtio_snd_jack_hdr virtio_snd_jack_hdr;
+
+/* jack information structure */
+typedef struct virtio_snd_jack_info virtio_snd_jack_info;
+
+/* jack remapping control request */
+typedef struct virtio_snd_jack_remap virtio_snd_jack_remap;
+
+/*
+ * PCM CONTROL MESSAGES
+ */
+typedef struct virtio_snd_pcm_hdr virtio_snd_pcm_hdr;
+
+/* PCM stream info structure */
+typedef struct virtio_snd_pcm_info virtio_snd_pcm_info;
+
+/* set PCM stream params */
+typedef struct virtio_snd_pcm_set_params virtio_snd_pcm_set_params;
+
+/* I/O request header */
+typedef struct virtio_snd_pcm_xfer virtio_snd_pcm_xfer;
+
+/* I/O request status */
+typedef struct virtio_snd_pcm_status virtio_snd_pcm_status;
+
+/* device structs */
+
+typedef struct VirtIOSound VirtIOSound;
+
+typedef struct VirtIOSoundPCMStream VirtIOSoundPCMStream;
+
+typedef struct virtio_snd_ctrl_command virtio_snd_ctrl_command;
+
+typedef struct VirtIOSoundPCM VirtIOSoundPCM;
+
+typedef struct VirtIOSoundPCMBuffer VirtIOSoundPCMBuffer;
+
+/*
+ * The VirtIO sound spec reuses layouts and values from the High Definition
+ * Audio spec (virtio/v1.2: 5.14 Sound Device). This struct handles each I/O
+ * message's buffer (virtio/v1.2: 5.14.6.8 PCM I/O Messages).
+ *
+ * In the case of TX (i.e. playback) buffers, we defer reading the raw PCM data
+ * from the virtqueue until QEMU's sound backsystem calls the output callback.
+ * This is tracked by the `bool populated;` field, which is set to true when
+ * data has been read into our own buffer for consumption.
+ *
+ * VirtIOSoundPCMBuffer has a dynamic size since it includes the raw PCM data
+ * in its allocation. It must be initialized and destroyed as follows:
+ *
+ * size_t size = [[derived from owned VQ element descriptor sizes]];
+ * buffer = g_malloc0(sizeof(VirtIOSoundPCMBuffer) + size);
+ * buffer->elem = [[owned VQ element]];
+ *
+ * [..]
+ *
+ * g_free(buffer->elem);
+ * g_free(buffer);
+ */
+struct VirtIOSoundPCMBuffer {
+ QSIMPLEQ_ENTRY(VirtIOSoundPCMBuffer) entry;
+ VirtQueueElement *elem;
+ VirtQueue *vq;
+ size_t size;
+ /*
+ * In TX / Plaback, `offset` represents the first unused position inside
+ * `data`. If `offset == size` then there are no unused data left.
+ */
+ uint64_t offset;
+ /* Used for the TX queue for lazy I/O copy from `elem` */
+ bool populated;
+ /*
+ * VirtIOSoundPCMBuffer is an unsized type because it ends with an array of
+ * bytes. The size of `data` is determined from the I/O message's read-only
+ * or write-only size when allocating VirtIOSoundPCMBuffer.
+ */
+ uint8_t data[];
+};
+
+struct VirtIOSoundPCM {
+ VirtIOSound *snd;
+ /*
+ * PCM parameters are a separate field instead of a VirtIOSoundPCMStream
+ * field, because the operation of PCM control requests is first
+ * VIRTIO_SND_R_PCM_SET_PARAMS and then VIRTIO_SND_R_PCM_PREPARE; this
+ * means that some times we get parameters without having an allocated
+ * stream yet.
+ */
+ virtio_snd_pcm_set_params *pcm_params;
+ VirtIOSoundPCMStream **streams;
+};
+
+struct VirtIOSoundPCMStream {
+ VirtIOSoundPCM *pcm;
+ virtio_snd_pcm_info info;
+ virtio_snd_pcm_set_params params;
+ uint32_t id;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+ VirtIOSound *s;
+ bool flushing;
+ audsettings as;
+ union {
+ SWVoiceIn *in;
+ SWVoiceOut *out;
+ } voice;
+ QemuMutex queue_mutex;
+ bool active;
+ QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) queue;
+};
+
+/*
+ * PCM stream state machine.
+ * -------------------------
+ *
+ * 5.14.6.6.1 PCM Command Lifecycle
+ * ================================
+ *
+ * A PCM stream has the following command lifecycle:
+ * - `SET PARAMETERS`
+ * The driver negotiates the stream parameters (format, transport, etc) with
+ * the device.
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`.
+ * - `PREPARE`
+ * The device prepares the stream (allocates resources, etc).
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`, `START`,
+ * `RELEASE`. Output only: the driver transfers data for pre-buffing.
+ * - `START`
+ * The device starts the stream (unmute, putting into running state, etc).
+ * Possible valid transitions: `STOP`.
+ * The driver transfers data to/from the stream.
+ * - `STOP`
+ * The device stops the stream (mute, putting into non-running state, etc).
+ * Possible valid transitions: `START`, `RELEASE`.
+ * - `RELEASE`
+ * The device releases the stream (frees resources, etc).
+ * Possible valid transitions: `SET PARAMETERS`, `PREPARE`.
+ *
+ * +---------------+ +---------+ +---------+ +-------+ +-------+
+ * | SetParameters | | Prepare | | Release | | Start | | Stop |
+ * +---------------+ +---------+ +---------+ +-------+ +-------+
+ * |- | | | |
+ * || | | | |
+ * |< | | | |
+ * |------------->| | | |
+ * |<-------------| | | |
+ * | |- | | |
+ * | || | | |
+ * | |< | | |
+ * | |--------------------->| |
+ * | |---------->| | |
+ * | | | |-------->|
+ * | | | |<--------|
+ * | | |<-------------------|
+ * |<-------------------------| | |
+ * | |<----------| | |
+ *
+ * CTRL in the VirtIOSound device
+ * ==============================
+ *
+ * The control messages that affect the state of a stream arrive in the
+ * `virtio_snd_handle_ctrl()` queue callback and are of type `struct
+ * virtio_snd_ctrl_command`. They are stored in a queue field in the device
+ * type, `VirtIOSound`. This allows deferring the CTRL request completion if
+ * it's not immediately possible due to locking/state reasons.
+ *
+ * The CTRL message is finally handled in `process_cmd()`.
+ */
+struct VirtIOSound {
+ VirtIODevice parent_obj;
+
+ VirtQueue *queues[VIRTIO_SND_VQ_MAX];
+ uint64_t features;
+ VirtIOSoundPCM *pcm;
+ QEMUSoundCard card;
+ VMChangeStateEntry *vmstate;
+ virtio_snd_config snd_conf;
+ QemuMutex cmdq_mutex;
+ QTAILQ_HEAD(, virtio_snd_ctrl_command) cmdq;
+ bool processing_cmdq;
+ /*
+ * Convenience queue to keep track of invalid tx/rx queue messages inside
+ * the tx/rx callbacks.
+ *
+ * In the callbacks as a first step we are emptying the virtqueue to handle
+ * each message and we cannot add an invalid message back to the queue: we
+ * would re-process it in subsequent loop iterations.
+ *
+ * Instead, we add them to this queue and after finishing examining every
+ * virtqueue element, we inform the guest for each invalid message.
+ *
+ * This queue must be empty at all times except for inside the tx/rx
+ * callbacks.
+ */
+ QSIMPLEQ_HEAD(, VirtIOSoundPCMBuffer) invalid;
+};
+
+struct virtio_snd_ctrl_command {
+ VirtQueueElement *elem;
+ VirtQueue *vq;
+ virtio_snd_hdr ctrl;
+ virtio_snd_hdr resp;
+ size_t payload_size;
+ QTAILQ_ENTRY(virtio_snd_ctrl_command) next;
+};
+#endif
diff --git a/include/hw/block/block.h b/include/hw/block/block.h
index 1e8b6253dd..de3946a5f1 100644
--- a/include/hw/block/block.h
+++ b/include/hw/block/block.h
@@ -13,11 +13,13 @@
#include "exec/hwaddr.h"
#include "qapi/qapi-types-block-core.h"
+#include "hw/qdev-properties-system.h"
/* Configuration */
typedef struct BlockConf {
BlockBackend *blk;
+ OnOffAuto backend_defaults;
uint32_t physical_block_size;
uint32_t logical_block_size;
uint32_t min_io_size;
@@ -29,6 +31,7 @@ typedef struct BlockConf {
uint32_t lcyls, lheads, lsecs;
OnOffAuto wce;
bool share_rw;
+ OnOffAuto account_invalid, account_failed;
BlockdevOnError rerror;
BlockdevOnError werror;
} BlockConf;
@@ -47,6 +50,8 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
}
#define DEFINE_BLOCK_PROPERTIES_BASE(_state, _conf) \
+ DEFINE_PROP_ON_OFF_AUTO("backend_defaults", _state, \
+ _conf.backend_defaults, ON_OFF_AUTO_AUTO), \
DEFINE_PROP_BLOCKSIZE("logical_block_size", _state, \
_conf.logical_block_size), \
DEFINE_PROP_BLOCKSIZE("physical_block_size", _state, \
@@ -57,7 +62,11 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
_conf.discard_granularity, -1), \
DEFINE_PROP_ON_OFF_AUTO("write-cache", _state, _conf.wce, \
ON_OFF_AUTO_AUTO), \
- DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false)
+ DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false), \
+ DEFINE_PROP_ON_OFF_AUTO("account-invalid", _state, \
+ _conf.account_invalid, ON_OFF_AUTO_AUTO), \
+ DEFINE_PROP_ON_OFF_AUTO("account-failed", _state, \
+ _conf.account_failed, ON_OFF_AUTO_AUTO)
#define DEFINE_BLOCK_PROPERTIES(_state, _conf) \
DEFINE_PROP_DRIVE("drive", _state, _conf.blk), \
@@ -79,8 +88,8 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
/* Backend access helpers */
-bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size,
- Error **errp);
+bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev,
+ void *buf, hwaddr size, Error **errp);
/* Configuration helpers */
diff --git a/include/hw/block/fdc.h b/include/hw/block/fdc.h
index 1ecca7cac7..c367c5efea 100644
--- a/include/hw/block/fdc.h
+++ b/include/hw/block/fdc.h
@@ -10,11 +10,13 @@
#define TYPE_ISA_FDC "isa-fdc"
void isa_fdc_init_drives(ISADevice *fdc, DriveInfo **fds);
-void fdctrl_init_sysbus(qemu_irq irq, int dma_chann,
- hwaddr mmio_base, DriveInfo **fds);
+void fdctrl_init_sysbus(qemu_irq irq, hwaddr mmio_base, DriveInfo **fds);
void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base,
DriveInfo **fds, qemu_irq *fdc_tc);
+void isa_fdc_set_iobase(ISADevice *fdc, hwaddr iobase);
+void isa_fdc_set_enabled(ISADevice *fdc, bool enabled);
+
FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i);
int cmos_get_fd_drive_type(FloppyDriveType fd0);
diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h
index 2136a2d5e4..2b5ccd92f4 100644
--- a/include/hw/block/flash.h
+++ b/include/hw/block/flash.h
@@ -4,14 +4,13 @@
/* NOR flash devices */
#include "exec/hwaddr.h"
+#include "qom/object.h"
/* pflash_cfi01.c */
#define TYPE_PFLASH_CFI01 "cfi.pflash01"
-#define PFLASH_CFI01(obj) \
- OBJECT_CHECK(PFlashCFI01, (obj), TYPE_PFLASH_CFI01)
+OBJECT_DECLARE_SIMPLE_TYPE(PFlashCFI01, PFLASH_CFI01)
-typedef struct PFlashCFI01 PFlashCFI01;
PFlashCFI01 *pflash_cfi01_register(hwaddr base,
const char *name,
@@ -29,10 +28,8 @@ void pflash_cfi01_legacy_drive(PFlashCFI01 *dev, DriveInfo *dinfo);
/* pflash_cfi02.c */
#define TYPE_PFLASH_CFI02 "cfi.pflash02"
-#define PFLASH_CFI02(obj) \
- OBJECT_CHECK(PFlashCFI02, (obj), TYPE_PFLASH_CFI02)
+OBJECT_DECLARE_SIMPLE_TYPE(PFlashCFI02, PFLASH_CFI02)
-typedef struct PFlashCFI02 PFlashCFI02;
PFlashCFI02 *pflash_cfi02_register(hwaddr base,
const char *name,
@@ -56,27 +53,33 @@ void nand_setio(DeviceState *dev, uint32_t value);
uint32_t nand_getio(DeviceState *dev);
uint32_t nand_getbuswidth(DeviceState *dev);
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_MICRON 0x2c
/* onenand.c */
void *onenand_raw_otp(DeviceState *onenand_device);
/* ecc.c */
typedef struct {
- uint8_t cp; /* Column parity */
- uint16_t lp[2]; /* Line parity */
+ uint8_t cp; /* Column parity */
+ uint16_t lp[2]; /* Line parity */
uint16_t count;
} ECCState;
uint8_t ecc_digest(ECCState *s, uint8_t sample);
void ecc_reset(ECCState *s);
-extern VMStateDescription vmstate_ecc_state;
+extern const VMStateDescription vmstate_ecc_state;
+
+/* m25p80.c */
+
+#define TYPE_M25P80 "m25p80-generic"
+
+BlockBackend *m25p80_get_blk(DeviceState *dev);
#endif
diff --git a/include/hw/block/swim.h b/include/hw/block/swim.h
index 6add3499d0..5f567e8d59 100644
--- a/include/hw/block/swim.h
+++ b/include/hw/block/swim.h
@@ -11,17 +11,16 @@
#ifndef SWIM_H
#define SWIM_H
-#include "qemu/osdep.h"
+#include "hw/block/block.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define SWIM_MAX_FD 2
-typedef struct SWIMDrive SWIMDrive;
-typedef struct SWIMBus SWIMBus;
typedef struct SWIMCtrl SWIMCtrl;
#define TYPE_SWIM_DRIVE "swim-drive"
-#define SWIM_DRIVE(obj) OBJECT_CHECK(SWIMDrive, (obj), TYPE_SWIM_DRIVE)
+OBJECT_DECLARE_SIMPLE_TYPE(SWIMDrive, SWIM_DRIVE)
struct SWIMDrive {
DeviceState qdev;
@@ -30,7 +29,7 @@ struct SWIMDrive {
};
#define TYPE_SWIM_BUS "swim-bus"
-#define SWIM_BUS(obj) OBJECT_CHECK(SWIMBus, (obj), TYPE_SWIM_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(SWIMBus, SWIM_BUS)
struct SWIMBus {
BusState bus;
@@ -44,33 +43,30 @@ typedef struct FDrive {
} FDrive;
struct SWIMCtrl {
- MemoryRegion iomem;
+ MemoryRegion swim;
+ MemoryRegion iwm;
+ MemoryRegion ism;
FDrive drives[SWIM_MAX_FD];
int mode;
/* IWM mode */
int iwm_switch;
- uint16_t regs[8];
-#define IWM_PH0 0
-#define IWM_PH1 1
-#define IWM_PH2 2
-#define IWM_PH3 3
-#define IWM_MTR 4
-#define IWM_DRIVE 5
-#define IWM_Q6 6
-#define IWM_Q7 7
- uint8_t iwm_data;
- uint8_t iwm_mode;
+ uint8_t iwm_latches;
+ uint8_t iwmregs[8];
/* SWIM mode */
+ uint8_t ismregs[16];
uint8_t swim_phase;
uint8_t swim_mode;
+ uint8_t swim_status;
+ uint8_t pram[16];
+ uint8_t pram_idx;
SWIMBus bus;
};
#define TYPE_SWIM "swim"
-#define SWIM(obj) OBJECT_CHECK(SWIM, (obj), TYPE_SWIM)
+OBJECT_DECLARE_SIMPLE_TYPE(Swim, SWIM)
-typedef struct SWIM {
+struct Swim {
SysBusDevice parent_obj;
SWIMCtrl ctrl;
-} SWIM;
+};
#endif
diff --git a/include/hw/boards.h b/include/hw/boards.h
index 426ce5f625..69c1ba45cf 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -6,7 +6,6 @@
#include "exec/memory.h"
#include "sysemu/hostmem.h"
#include "sysemu/blockdev.h"
-#include "sysemu/accel.h"
#include "qapi/qapi-types-machine.h"
#include "qemu/module.h"
#include "qom/object.h"
@@ -21,26 +20,88 @@
#define TYPE_MACHINE "machine"
#undef MACHINE /* BSD defines it and QEMU does not use it */
-#define MACHINE(obj) \
- OBJECT_CHECK(MachineState, (obj), TYPE_MACHINE)
-#define MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MachineClass, (obj), TYPE_MACHINE)
-#define MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(MachineClass, (klass), TYPE_MACHINE)
+OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE)
extern MachineState *current_machine;
-void machine_run_board_init(MachineState *machine);
+/**
+ * machine_class_default_cpu_type: Return the machine default CPU type.
+ * @mc: Machine class
+ */
+const char *machine_class_default_cpu_type(MachineClass *mc);
+
+void machine_add_audiodev_property(MachineClass *mc);
+void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp);
bool machine_usb(MachineState *machine);
int machine_phandle_start(MachineState *machine);
bool machine_dump_guest_core(MachineState *machine);
bool machine_mem_merge(MachineState *machine);
+bool machine_require_guest_memfd(MachineState *machine);
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine);
void machine_set_cpu_numa_node(MachineState *machine,
const CpuInstanceProperties *props,
Error **errp);
+void machine_parse_smp_config(MachineState *ms,
+ const SMPConfiguration *config, Error **errp);
+unsigned int machine_topo_get_cores_per_socket(const MachineState *ms);
+unsigned int machine_topo_get_threads_per_socket(const MachineState *ms);
+void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size);
+/**
+ * machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices
+ * @mc: Machine class
+ * @type: type to allow (should be a subtype of TYPE_SYS_BUS_DEVICE)
+ *
+ * Add the QOM type @type to the list of devices of which are subtypes
+ * of TYPE_SYS_BUS_DEVICE but which are still permitted to be dynamically
+ * created (eg by the user on the command line with -device).
+ * By default if the user tries to create any devices on the command line
+ * that are subtypes of TYPE_SYS_BUS_DEVICE they will get an error message;
+ * for the special cases which are permitted for this machine model, the
+ * machine model class init code must call this function to add them
+ * to the list of specifically permitted devices.
+ */
void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type);
+
+/**
+ * device_type_is_dynamic_sysbus: Check if type is an allowed sysbus device
+ * type for the machine class.
+ * @mc: Machine class
+ * @type: type to check (should be a subtype of TYPE_SYS_BUS_DEVICE)
+ *
+ * Returns: true if @type is a type in the machine's list of
+ * dynamically pluggable sysbus devices; otherwise false.
+ *
+ * Check if the QOM type @type is in the list of allowed sysbus device
+ * types (see machine_class_allowed_dynamic_sysbus_dev()).
+ * Note that if @type has a parent type in the list, it is allowed too.
+ */
+bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type);
+
+/**
+ * device_is_dynamic_sysbus: test whether device is a dynamic sysbus device
+ * @mc: Machine class
+ * @dev: device to check
+ *
+ * Returns: true if @dev is a sysbus device on the machine's list
+ * of dynamically pluggable sysbus devices; otherwise false.
+ *
+ * This function checks whether @dev is a valid dynamic sysbus device,
+ * by first confirming that it is a sysbus device and then checking it
+ * against the list of permitted dynamic sysbus devices which has been
+ * set up by the machine using machine_class_allow_dynamic_sysbus_dev().
+ *
+ * It is valid to call this with something that is not a subclass of
+ * TYPE_SYS_BUS_DEVICE; the function will return false in this case.
+ * This allows hotplug callback functions to be written as:
+ * if (device_is_dynamic_sysbus(mc, dev)) {
+ * handle dynamic sysbus case;
+ * } else if (some other kind of hotplug) {
+ * handle that;
+ * }
+ */
+bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev);
+
/*
* Checks that backend isn't used, preps it for exclusive usage and
* returns migratable MemoryRegion provided by backend.
@@ -60,7 +121,7 @@ typedef struct CPUArchId {
uint64_t arch_id;
int64_t vcpus_count;
CpuInstanceProperties props;
- Object *cpu;
+ CPUState *cpu;
const char *type;
} CPUArchId;
@@ -75,6 +136,25 @@ typedef struct {
} CPUArchIdList;
/**
+ * SMPCompatProps:
+ * @prefer_sockets - whether sockets are preferred over cores in smp parsing
+ * @dies_supported - whether dies are supported by the machine
+ * @clusters_supported - whether clusters are supported by the machine
+ * @has_clusters - whether clusters are explicitly specified in the user
+ * provided SMP configuration
+ * @books_supported - whether books are supported by the machine
+ * @drawers_supported - whether drawers are supported by the machine
+ */
+typedef struct {
+ bool prefer_sockets;
+ bool dies_supported;
+ bool clusters_supported;
+ bool has_clusters;
+ bool books_supported;
+ bool drawers_supported;
+} SMPCompatProps;
+
+/**
* MachineClass:
* @deprecation_reason: If set, the machine is marked as deprecated. The
* string should provide some clear information about what to use instead.
@@ -90,7 +170,7 @@ typedef struct {
* any actions to be performed by hotplug handler.
* @cpu_index_to_instance_props:
* used to provide @cpu_index to socket/core/thread number mapping, allowing
- * legacy code to perform maping from cpu_index to topology properties
+ * legacy code to perform mapping from cpu_index to topology properties
* Returns: tuple of socket/core/thread ids given cpu_index belongs to.
* used to provide @cpu_index to socket number mapping, allowing
* a machine to group CPU threads belonging to the same socket/package
@@ -132,12 +212,9 @@ typedef struct {
* @kvm_type:
* Return the type of KVM corresponding to the kvm-type string option or
* computed based on other criteria such as the host kernel capabilities.
+ * kvm-type may be NULL if it is not needed.
* @numa_mem_supported:
* true if '--numa node.mem' option is supported and false otherwise
- * @smp_parse:
- * The function pointer to hook different machine specific functions for
- * parsing "smp-opts" from QemuOpts to MachineState::CpuTopology and more
- * machine specific topology fields, such as smp_dies for PCMachine.
* @hotplug_allowed:
* If the hook is provided, then it'll be called for each device
* hotplug to check whether the device hotplug is allowed. Return
@@ -146,10 +223,10 @@ typedef struct {
* the rejection. If the hook is not provided, all hotplug will be
* allowed.
* @default_ram_id:
- * Specifies inital RAM MemoryRegion name to be used for default backend
+ * Specifies initial RAM MemoryRegion name to be used for default backend
* creation if user explicitly hasn't specified backend with "memory-backend"
* property.
- * It also will be used as a way to optin into "-m" option support.
+ * It also will be used as a way to option into "-m" option support.
* If it's not set by board, '-m' will be ignored and generic code will
* not create default RAM MemoryRegion.
* @fixup_ram_size:
@@ -171,11 +248,9 @@ struct MachineClass {
const char *deprecation_reason;
void (*init)(MachineState *state);
- void (*reset)(MachineState *state);
+ void (*reset)(MachineState *state, ShutdownCause reason);
void (*wakeup)(MachineState *state);
- void (*hot_add_cpu)(MachineState *state, const int64_t id, Error **errp);
int (*kvm_type)(MachineState *machine, const char *arg);
- void (*smp_parse)(MachineState *ms, QemuOpts *opts);
BlockInterfaceType block_default_type;
int units_per_default_bus;
@@ -193,6 +268,7 @@ struct MachineClass {
const char *default_machine_opts;
const char *default_boot_order;
const char *default_display;
+ const char *default_nic;
GPtrArray *compat_props;
const char *hw_version;
ram_addr_t default_ram_size;
@@ -204,17 +280,17 @@ struct MachineClass {
bool has_hotpluggable_cpus;
bool ignore_memory_transaction_failures;
int numa_mem_align_shift;
- const char **valid_cpu_types;
+ const char * const *valid_cpu_types;
strList *allowed_dynamic_sysbus_devices;
bool auto_enable_numa_with_memhp;
bool auto_enable_numa_with_memdev;
- void (*numa_auto_assign_ram)(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
bool ignore_boot_device_suffixes;
bool smbus_no_migration_support;
bool nvdimm_supported;
bool numa_mem_supported;
bool auto_enable_numa;
+ bool cpu_cluster_has_numa_boundary;
+ SMPCompatProps smp_props;
const char *default_ram_id;
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
@@ -232,26 +308,50 @@ struct MachineClass {
* DeviceMemoryState:
* @base: address in guest physical address space where the memory
* address space for memory devices starts
- * @mr: address space container for memory devices
+ * @mr: memory region container for memory devices
+ * @as: address space for memory devices
+ * @listener: memory listener used to track used memslots in the address space
+ * @dimm_size: the sum of plugged DIMMs' sizes
+ * @used_region_size: the part of @mr already used by memory devices
+ * @required_memslots: the number of memslots required by memory devices
+ * @used_memslots: the number of memslots currently used by memory devices
+ * @memslot_auto_decision_active: whether any plugged memory device
+ * automatically decided to use more than
+ * one memslot
*/
typedef struct DeviceMemoryState {
hwaddr base;
MemoryRegion mr;
+ AddressSpace as;
+ MemoryListener listener;
+ uint64_t dimm_size;
+ uint64_t used_region_size;
+ unsigned int required_memslots;
+ unsigned int used_memslots;
+ unsigned int memslot_auto_decision_active;
} DeviceMemoryState;
/**
* CpuTopology:
* @cpus: the number of present logical processors on the machine
- * @cores: the number of cores in one package
+ * @drawers: the number of drawers on the machine
+ * @books: the number of books in one drawer
+ * @sockets: the number of sockets in one book
+ * @dies: the number of dies in one socket
+ * @clusters: the number of clusters in one die
+ * @cores: the number of cores in one cluster
* @threads: the number of threads in one core
- * @sockets: the number of sockets on the machine
* @max_cpus: the maximum number of logical processors on the machine
*/
typedef struct CpuTopology {
unsigned int cpus;
+ unsigned int drawers;
+ unsigned int books;
+ unsigned int sockets;
+ unsigned int dies;
+ unsigned int clusters;
unsigned int cores;
unsigned int threads;
- unsigned int sockets;
unsigned int max_cpus;
} CpuTopology;
@@ -261,25 +361,25 @@ typedef struct CpuTopology {
struct MachineState {
/*< private >*/
Object parent_obj;
- Notifier sysbus_notifier;
/*< public >*/
+ void *fdt;
char *dtb;
char *dumpdtb;
int phandle_start;
char *dt_compatible;
bool dump_guest_core;
bool mem_merge;
+ bool require_guest_memfd;
bool usb;
bool usb_disabled;
char *firmware;
bool iommu;
bool suppress_vmdesc;
- bool enforce_config_section;
bool enable_graphics;
- char *memory_encryption;
- char *ram_memdev_id;
+ ConfidentialGuestSupport *cgs;
+ HostMemoryBackend *memdev;
/*
* convenience alias to ram_memdev_id backend memory region
* or to numa container memory region
@@ -287,10 +387,18 @@ struct MachineState {
MemoryRegion *ram;
DeviceMemoryState *device_memory;
+ /*
+ * Included in MachineState for simplicity, but not supported
+ * unless machine_add_audiodev_property is called. Boards
+ * that have embedded audio devices can call it from the
+ * machine init function and forward the property to the device.
+ */
+ char *audiodev;
+
ram_addr_t ram_size;
ram_addr_t maxram_size;
uint64_t ram_slots;
- const char *boot_order;
+ BootConfiguration boot_config;
char *kernel_filename;
char *kernel_cmdline;
char *initrd_filename;
@@ -319,6 +427,42 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)
+extern GlobalProperty hw_compat_9_0[];
+extern const size_t hw_compat_9_0_len;
+
+extern GlobalProperty hw_compat_8_2[];
+extern const size_t hw_compat_8_2_len;
+
+extern GlobalProperty hw_compat_8_1[];
+extern const size_t hw_compat_8_1_len;
+
+extern GlobalProperty hw_compat_8_0[];
+extern const size_t hw_compat_8_0_len;
+
+extern GlobalProperty hw_compat_7_2[];
+extern const size_t hw_compat_7_2_len;
+
+extern GlobalProperty hw_compat_7_1[];
+extern const size_t hw_compat_7_1_len;
+
+extern GlobalProperty hw_compat_7_0[];
+extern const size_t hw_compat_7_0_len;
+
+extern GlobalProperty hw_compat_6_2[];
+extern const size_t hw_compat_6_2_len;
+
+extern GlobalProperty hw_compat_6_1[];
+extern const size_t hw_compat_6_1_len;
+
+extern GlobalProperty hw_compat_6_0[];
+extern const size_t hw_compat_6_0_len;
+
+extern GlobalProperty hw_compat_5_2[];
+extern const size_t hw_compat_5_2_len;
+
+extern GlobalProperty hw_compat_5_1[];
+extern const size_t hw_compat_5_1_len;
+
extern GlobalProperty hw_compat_5_0[];
extern const size_t hw_compat_5_0_len;
diff --git a/include/hw/char/avr_usart.h b/include/hw/char/avr_usart.h
index 5739aaf26f..0cc599e9b1 100644
--- a/include/hw/char/avr_usart.h
+++ b/include/hw/char/avr_usart.h
@@ -24,7 +24,7 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
-#include "hw/hw.h"
+#include "qom/object.h"
/* Offsets of registers. */
#define USART_DR 0x06
@@ -34,7 +34,7 @@
#define USART_BRRH 0x05
#define USART_BRRL 0x04
-/* Relevant bits in regiters. */
+/* Relevant bits in registers. */
#define USART_CSRA_RXC (1 << 7)
#define USART_CSRA_TXC (1 << 6)
#define USART_CSRA_DRE (1 << 5)
@@ -57,10 +57,9 @@
#define USART_CSRC_CSZ0 (1 << 1)
#define TYPE_AVR_USART "avr-usart"
-#define AVR_USART(obj) \
- OBJECT_CHECK(AVRUsartState, (obj), TYPE_AVR_USART)
+OBJECT_DECLARE_SIMPLE_TYPE(AVRUsartState, AVR_USART)
-typedef struct {
+struct AVRUsartState {
/* <private> */
SysBusDevice parent_obj;
@@ -88,6 +87,6 @@ typedef struct {
qemu_irq txc_irq;
/* Data Register Empty */
qemu_irq dre_irq;
-} AVRUsartState;
+};
#endif /* HW_CHAR_AVR_USART_H */
diff --git a/include/hw/char/bcm2835_aux.h b/include/hw/char/bcm2835_aux.h
index 934acf9c81..9e081793a0 100644
--- a/include/hw/char/bcm2835_aux.h
+++ b/include/hw/char/bcm2835_aux.h
@@ -11,13 +11,14 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_BCM2835_AUX "bcm2835-aux"
-#define BCM2835_AUX(obj) OBJECT_CHECK(BCM2835AuxState, (obj), TYPE_BCM2835_AUX)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835AuxState, BCM2835_AUX)
#define BCM2835_AUX_RX_FIFO_LEN 8
-typedef struct {
+struct BCM2835AuxState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -29,6 +30,6 @@ typedef struct {
uint8_t read_fifo[BCM2835_AUX_RX_FIFO_LEN];
uint8_t read_pos, read_count;
uint8_t ier, iir;
-} BCM2835AuxState;
+};
#endif
diff --git a/include/hw/char/cadence_uart.h b/include/hw/char/cadence_uart.h
index ed7b58d31d..e7f7cd8468 100644
--- a/include/hw/char/cadence_uart.h
+++ b/include/hw/char/cadence_uart.h
@@ -24,6 +24,7 @@
#include "chardev/char-fe.h"
#include "qapi/error.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define CADENCE_UART_RX_FIFO_SIZE 16
#define CADENCE_UART_TX_FIFO_SIZE 16
@@ -31,10 +32,9 @@
#define CADENCE_UART_R_MAX (0x48/4)
#define TYPE_CADENCE_UART "cadence_uart"
-#define CADENCE_UART(obj) OBJECT_CHECK(CadenceUARTState, (obj), \
- TYPE_CADENCE_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceUARTState, CADENCE_UART)
-typedef struct {
+struct CadenceUARTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -51,23 +51,6 @@ typedef struct {
qemu_irq irq;
QEMUTimer *fifo_trigger_handle;
Clock *refclk;
-} CadenceUARTState;
-
-static inline DeviceState *cadence_uart_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new(TYPE_CADENCE_UART);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+};
#endif
diff --git a/include/hw/char/cmsdk-apb-uart.h b/include/hw/char/cmsdk-apb-uart.h
index bc9069f9fd..7de8f8d1b9 100644
--- a/include/hw/char/cmsdk-apb-uart.h
+++ b/include/hw/char/cmsdk-apb-uart.h
@@ -12,15 +12,14 @@
#ifndef CMSDK_APB_UART_H
#define CMSDK_APB_UART_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_UART "cmsdk-apb-uart"
-#define CMSDK_APB_UART(obj) OBJECT_CHECK(CMSDKAPBUART, (obj), \
- TYPE_CMSDK_APB_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBUART, CMSDK_APB_UART)
-typedef struct {
+struct CMSDKAPBUART {
/*< private >*/
SysBusDevice parent_obj;
@@ -42,38 +41,6 @@ typedef struct {
/* This UART has no FIFO, only a 1-character buffer for each of Tx and Rx */
uint8_t txbuf;
uint8_t rxbuf;
-} CMSDKAPBUART;
-
-/**
- * cmsdk_apb_uart_create - convenience function to create TYPE_CMSDK_APB_UART
- * @addr: location in system memory to map registers
- * @chr: Chardev backend to connect UART to, or NULL if no backend
- * @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
- */
-static inline DeviceState *cmsdk_apb_uart_create(hwaddr addr,
- qemu_irq txint,
- qemu_irq rxint,
- qemu_irq txovrint,
- qemu_irq rxovrint,
- qemu_irq uartint,
- Chardev *chr,
- uint32_t pclk_frq)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new(TYPE_CMSDK_APB_UART);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, txint);
- sysbus_connect_irq(s, 1, rxint);
- sysbus_connect_irq(s, 2, txovrint);
- sysbus_connect_irq(s, 3, rxovrint);
- sysbus_connect_irq(s, 4, uartint);
- return dev;
-}
+};
#endif
diff --git a/include/hw/char/digic-uart.h b/include/hw/char/digic-uart.h
index de9a3e3551..f710a1a099 100644
--- a/include/hw/char/digic-uart.h
+++ b/include/hw/char/digic-uart.h
@@ -20,10 +20,10 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_DIGIC_UART "digic-uart"
-#define DIGIC_UART(obj) \
- OBJECT_CHECK(DigicUartState, (obj), TYPE_DIGIC_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicUartState, DIGIC_UART)
enum {
R_TX = 0x00,
@@ -32,7 +32,7 @@ enum {
R_MAX
};
-typedef struct DigicUartState {
+struct DigicUartState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -42,6 +42,6 @@ typedef struct DigicUartState {
uint32_t reg_rx;
uint32_t reg_st;
-} DigicUartState;
+};
#endif /* HW_CHAR_DIGIC_UART_H */
diff --git a/include/hw/char/escc.h b/include/hw/char/escc.h
index 794b653484..5669a5b811 100644
--- a/include/hw/char/escc.h
+++ b/include/hw/char/escc.h
@@ -5,12 +5,13 @@
#include "chardev/char-serial.h"
#include "hw/sysbus.h"
#include "ui/input.h"
+#include "qom/object.h"
/* escc.c */
#define TYPE_ESCC "escc"
#define ESCC_SIZE 4
-#define ESCC(obj) OBJECT_CHECK(ESCCState, (obj), TYPE_ESCC)
+OBJECT_DECLARE_SIMPLE_TYPE(ESCCState, ESCC)
typedef enum {
escc_chn_a, escc_chn_b,
@@ -44,9 +45,10 @@ typedef struct ESCCChannelState {
ESCCChnType type;
uint8_t rx, tx;
QemuInputHandlerState *hs;
+ char *sunkbd_layout;
} ESCCChannelState;
-typedef struct ESCCState {
+struct ESCCState {
SysBusDevice parent_obj;
struct ESCCChannelState chn[2];
@@ -55,6 +57,6 @@ typedef struct ESCCState {
MemoryRegion mmio;
uint32_t disabled;
uint32_t frequency;
-} ESCCState;
+};
#endif
diff --git a/include/hw/char/goldfish_tty.h b/include/hw/char/goldfish_tty.h
new file mode 100644
index 0000000000..d59733e5ae
--- /dev/null
+++ b/include/hw/char/goldfish_tty.h
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Goldfish TTY
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef HW_CHAR_GOLDFISH_TTY_H
+#define HW_CHAR_GOLDFISH_TTY_H
+
+#include "qemu/fifo8.h"
+#include "chardev/char-fe.h"
+#include "hw/sysbus.h"
+
+#define TYPE_GOLDFISH_TTY "goldfish_tty"
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishTTYState, GOLDFISH_TTY)
+
+#define GOLFISH_TTY_BUFFER_SIZE 128
+
+struct GoldfishTTYState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+ CharBackend chr;
+
+ uint32_t data_len;
+ uint64_t data_ptr;
+ bool int_enabled;
+
+ Fifo8 rx_fifo;
+};
+
+#endif
diff --git a/include/hw/char/grlib_uart.h b/include/hw/char/grlib_uart.h
new file mode 100644
index 0000000000..7496f8fd5e
--- /dev/null
+++ b/include/hw/char/grlib_uart.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU GRLIB UART
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2024 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRLIB_UART_H
+#define GRLIB_UART_H
+
+#define TYPE_GRLIB_APB_UART "grlib-apbuart"
+
+#endif
diff --git a/include/hw/char/ibex_uart.h b/include/hw/char/ibex_uart.h
index b6bd5a6700..9deadf223b 100644
--- a/include/hw/char/ibex_uart.h
+++ b/include/hw/char/ibex_uart.h
@@ -26,53 +26,17 @@
#define HW_IBEX_UART_H
#include "hw/sysbus.h"
-#include "hw/registerfields.h"
#include "chardev/char-fe.h"
#include "qemu/timer.h"
-
-REG32(INTR_STATE, 0x00)
- FIELD(INTR_STATE, TX_WATERMARK, 0, 1)
- FIELD(INTR_STATE, RX_WATERMARK, 1, 1)
- FIELD(INTR_STATE, TX_EMPTY, 2, 1)
- FIELD(INTR_STATE, RX_OVERFLOW, 3, 1)
-REG32(INTR_ENABLE, 0x04)
-REG32(INTR_TEST, 0x08)
-REG32(CTRL, 0x0C)
- FIELD(CTRL, TX_ENABLE, 0, 1)
- FIELD(CTRL, RX_ENABLE, 1, 1)
- FIELD(CTRL, NF, 2, 1)
- FIELD(CTRL, SLPBK, 4, 1)
- FIELD(CTRL, LLPBK, 5, 1)
- FIELD(CTRL, PARITY_EN, 6, 1)
- FIELD(CTRL, PARITY_ODD, 7, 1)
- FIELD(CTRL, RXBLVL, 8, 2)
- FIELD(CTRL, NCO, 16, 16)
-REG32(STATUS, 0x10)
- FIELD(STATUS, TXFULL, 0, 1)
- FIELD(STATUS, RXFULL, 1, 1)
- FIELD(STATUS, TXEMPTY, 2, 1)
- FIELD(STATUS, RXIDLE, 4, 1)
- FIELD(STATUS, RXEMPTY, 5, 1)
-REG32(RDATA, 0x14)
-REG32(WDATA, 0x18)
-REG32(FIFO_CTRL, 0x1c)
- FIELD(FIFO_CTRL, RXRST, 0, 1)
- FIELD(FIFO_CTRL, TXRST, 1, 1)
- FIELD(FIFO_CTRL, RXILVL, 2, 3)
- FIELD(FIFO_CTRL, TXILVL, 5, 2)
-REG32(FIFO_STATUS, 0x20)
-REG32(OVRD, 0x24)
-REG32(VAL, 0x28)
-REG32(TIMEOUT_CTRL, 0x2c)
+#include "qom/object.h"
#define IBEX_UART_TX_FIFO_SIZE 16
#define IBEX_UART_CLOCK 50000000 /* 50MHz clock */
#define TYPE_IBEX_UART "ibex-uart"
-#define IBEX_UART(obj) \
- OBJECT_CHECK(IbexUartState, (obj), TYPE_IBEX_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(IbexUartState, IBEX_UART)
-typedef struct {
+struct IbexUartState {
/* <private> */
SysBusDevice parent_obj;
@@ -82,6 +46,8 @@ typedef struct {
uint8_t tx_fifo[IBEX_UART_TX_FIFO_SIZE];
uint32_t tx_level;
+ uint32_t rx_level;
+
QEMUTimer *fifo_trigger_handle;
uint64_t char_tx_time;
@@ -103,5 +69,5 @@ typedef struct {
qemu_irq rx_watermark;
qemu_irq tx_empty;
qemu_irq rx_overflow;
-} IbexUartState;
+};
#endif /* HW_IBEX_UART_H */
diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h
index c8b74284f8..65f0e97c76 100644
--- a/include/hw/char/imx_serial.h
+++ b/include/hw/char/imx_serial.h
@@ -20,12 +20,17 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
+#include "qemu/fifo32.h"
#define TYPE_IMX_SERIAL "imx.serial"
-#define IMX_SERIAL(obj) OBJECT_CHECK(IMXSerialState, (obj), TYPE_IMX_SERIAL)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL)
+
+#define FIFO_SIZE 32
#define URXD_CHARRDY (1<<15) /* character read is valid */
#define URXD_ERR (1<<14) /* Character has error */
+#define URXD_OVRRUN (1<<13) /* 32nd character in RX FIFO */
#define URXD_FRMERR (1<<12) /* Character has frame error */
#define URXD_BRK (1<<11) /* Break received */
@@ -64,25 +69,40 @@
#define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */
#define UCR1_UARTEN (1<<0) /* UART Enable */
+#define UCR2_ATEN (1<<3) /* Ageing Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enable */
#define UCR2_RXEN (1<<1) /* Receiver enable */
#define UCR2_SRST (1<<0) /* Reset complete */
#define UCR4_DREN BIT(0) /* Receive Data Ready interrupt enable */
+#define UCR4_OREN BIT(1) /* Overrun interrupt enable */
#define UCR4_TCEN BIT(3) /* TX complete interrupt enable */
+#define UCR4_WKEN BIT(7) /* WAKE interrupt enable */
#define UTS1_TXEMPTY (1<<6)
#define UTS1_RXEMPTY (1<<5)
#define UTS1_TXFULL (1<<4)
#define UTS1_RXFULL (1<<3)
-typedef struct IMXSerialState {
+#define TL_MASK 0x3f
+
+ /* Bit time in nanoseconds assuming maximum baud rate of 115200 */
+#define BIT_TIME_NS 8681
+
+/* Assume 8 bits per character */
+#define NUM_BITS 8
+
+/* Ageing timer triggers after 8 characters */
+#define AGE_DURATION_NS (8 * NUM_BITS * BIT_TIME_NS)
+
+struct IMXSerialState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
- int32_t readbuff;
+ QEMUTimer ageing_timer;
+ Fifo32 rx_fifo;
uint32_t usr1;
uint32_t usr2;
@@ -103,6 +123,6 @@ typedef struct IMXSerialState {
qemu_irq irq;
CharBackend chr;
-} IMXSerialState;
+};
#endif
diff --git a/include/hw/char/lm32_juart.h b/include/hw/char/lm32_juart.h
deleted file mode 100644
index 6fce278326..0000000000
--- a/include/hw/char/lm32_juart.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef QEMU_HW_CHAR_LM32_JUART_H
-#define QEMU_HW_CHAR_LM32_JUART_H
-
-#include "hw/qdev-core.h"
-
-#define TYPE_LM32_JUART "lm32-juart"
-
-uint32_t lm32_juart_get_jtx(DeviceState *d);
-uint32_t lm32_juart_get_jrx(DeviceState *d);
-void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx);
-void lm32_juart_set_jrx(DeviceState *d, uint32_t jrx);
-
-#endif /* QEMU_HW_CHAR_LM32_JUART_H */
diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h
new file mode 100644
index 0000000000..b0e14ca355
--- /dev/null
+++ b/include/hw/char/mchp_pfsoc_mmuart.h
@@ -0,0 +1,68 @@
+/*
+ * Microchip PolarFire SoC MMUART emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_MCHP_PFSOC_MMUART_H
+#define HW_MCHP_PFSOC_MMUART_H
+
+#include "hw/sysbus.h"
+#include "hw/char/serial.h"
+
+#define MCHP_PFSOC_MMUART_REG_COUNT 13
+
+#define TYPE_MCHP_PFSOC_UART "mchp.pfsoc.uart"
+OBJECT_DECLARE_SIMPLE_TYPE(MchpPfSoCMMUartState, MCHP_PFSOC_UART)
+
+typedef struct MchpPfSoCMMUartState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion container;
+ MemoryRegion iomem;
+
+ SerialMM serial_mm;
+
+ uint32_t reg[MCHP_PFSOC_MMUART_REG_COUNT];
+} MchpPfSoCMMUartState;
+
+/**
+ * mchp_pfsoc_mmuart_create - Create a Microchip PolarFire SoC MMUART
+ *
+ * This is a helper routine for board to create a MMUART device that is
+ * compatible with Microchip PolarFire SoC.
+ *
+ * @sysmem: system memory region to map
+ * @base: base address of the MMUART registers
+ * @irq: IRQ number of the MMUART device
+ * @chr: character device to associate to
+ *
+ * @return: a pointer to the device specific control structure
+ */
+MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem,
+ hwaddr base, qemu_irq irq, Chardev *chr);
+
+#endif /* HW_MCHP_PFSOC_MMUART_H */
diff --git a/include/hw/char/nrf51_uart.h b/include/hw/char/nrf51_uart.h
index eb1c15b490..561b6383c4 100644
--- a/include/hw/char/nrf51_uart.h
+++ b/include/hw/char/nrf51_uart.h
@@ -14,12 +14,13 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
#include "hw/registerfields.h"
+#include "qom/object.h"
#define UART_FIFO_LENGTH 6
#define UART_SIZE 0x1000
#define TYPE_NRF51_UART "nrf51_soc.uart"
-#define NRF51_UART(obj) OBJECT_CHECK(NRF51UARTState, (obj), TYPE_NRF51_UART)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51UARTState, NRF51_UART)
REG32(UART_STARTRX, 0x000)
REG32(UART_STOPRX, 0x004)
@@ -54,7 +55,7 @@ REG32(UART_TXD, 0x51C)
REG32(UART_BAUDRATE, 0x524)
REG32(UART_CONFIG, 0x56C)
-typedef struct NRF51UARTState {
+struct NRF51UARTState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -72,6 +73,6 @@ typedef struct NRF51UARTState {
bool tx_started;
bool pending_tx_byte;
bool enabled;
-} NRF51UARTState;
+};
#endif
diff --git a/include/hw/char/parallel-isa.h b/include/hw/char/parallel-isa.h
new file mode 100644
index 0000000000..5284b2ffec
--- /dev/null
+++ b/include/hw/char/parallel-isa.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU ISA Parallel PORT emulation
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2007 Marko Kohtala
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef HW_PARALLEL_ISA_H
+#define HW_PARALLEL_ISA_H
+
+#include "parallel.h"
+
+#include "exec/ioport.h"
+#include "hw/isa/isa.h"
+#include "qom/object.h"
+
+#define TYPE_ISA_PARALLEL "isa-parallel"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAParallelState, ISA_PARALLEL)
+
+struct ISAParallelState {
+ ISADevice parent_obj;
+
+ uint32_t index;
+ uint32_t iobase;
+ uint32_t isairq;
+ ParallelState state;
+ PortioList portio_list;
+};
+
+void isa_parallel_set_iobase(ISADevice *parallel, hwaddr iobase);
+void isa_parallel_set_enabled(ISADevice *parallel, bool enabled);
+
+#endif /* HW_PARALLEL_ISA_H */
diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h
index 0a23c0f57e..cfb97cc7cc 100644
--- a/include/hw/char/parallel.h
+++ b/include/hw/char/parallel.h
@@ -1,9 +1,28 @@
#ifndef HW_PARALLEL_H
#define HW_PARALLEL_H
+#include "exec/memory.h"
#include "hw/isa/isa.h"
+#include "hw/irq.h"
+#include "chardev/char-fe.h"
#include "chardev/char.h"
+typedef struct ParallelState {
+ MemoryRegion iomem;
+ uint8_t dataw;
+ uint8_t datar;
+ uint8_t status;
+ uint8_t control;
+ qemu_irq irq;
+ int irq_pending;
+ CharBackend chr;
+ int hw_driver;
+ int epp_timeout;
+ uint32_t last_read_offset; /* For debugging */
+ /* Memory-mapped interface */
+ int it_shift;
+} ParallelState;
+
void parallel_hds_isa_init(ISABus *bus, int n);
bool parallel_mm_init(MemoryRegion *address_space,
diff --git a/include/hw/char/pl011.h b/include/hw/char/pl011.h
index bed758350f..d853802132 100644
--- a/include/hw/char/pl011.h
+++ b/include/hw/char/pl011.h
@@ -15,18 +15,20 @@
#ifndef HW_PL011_H
#define HW_PL011_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
-#include "qapi/error.h"
+#include "qom/object.h"
#define TYPE_PL011 "pl011"
-#define PL011(obj) OBJECT_CHECK(PL011State, (obj), TYPE_PL011)
+OBJECT_DECLARE_SIMPLE_TYPE(PL011State, PL011)
/* This shares the same struct (and cast macro) as the base pl011 device */
#define TYPE_PL011_LUMINARY "pl011_luminary"
-typedef struct PL011State {
+/* Depth of UART FIFO in bytes, when FIFO mode is enabled (else depth == 1) */
+#define PL011_FIFO_DEPTH 16
+
+struct PL011State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -38,7 +40,7 @@ typedef struct PL011State {
uint32_t dmacr;
uint32_t int_enabled;
uint32_t int_level;
- uint32_t read_fifo[16];
+ uint32_t read_fifo[PL011_FIFO_DEPTH];
uint32_t ilpr;
uint32_t ibrd;
uint32_t fbrd;
@@ -48,41 +50,11 @@ typedef struct PL011State {
int read_trigger;
CharBackend chr;
qemu_irq irq[6];
+ Clock *clk;
+ bool migrate_clk;
const unsigned char *id;
-} PL011State;
-
-static inline DeviceState *pl011_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("pl011");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
-
-static inline DeviceState *pl011_luminary_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("pl011_luminary");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
+};
- return dev;
-}
+DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr);
#endif
diff --git a/include/hw/char/renesas_sci.h b/include/hw/char/renesas_sci.h
index efdebc620a..a4764e3eee 100644
--- a/include/hw/char/renesas_sci.h
+++ b/include/hw/char/renesas_sci.h
@@ -11,9 +11,12 @@
#include "chardev/char-fe.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_RENESAS_SCI "renesas-sci"
-#define RSCI(obj) OBJECT_CHECK(RSCIState, (obj), TYPE_RENESAS_SCI)
+typedef struct RSCIState RSCIState;
+DECLARE_INSTANCE_CHECKER(RSCIState, RSCI,
+ TYPE_RENESAS_SCI)
enum {
ERI = 0,
@@ -23,7 +26,7 @@ enum {
SCI_NR_IRQ = 4
};
-typedef struct {
+struct RSCIState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -46,6 +49,6 @@ typedef struct {
int64_t trtime;
int64_t rx_next;
uint64_t input_freq;
-} RSCIState;
+};
#endif
diff --git a/include/hw/riscv/riscv_htif.h b/include/hw/char/riscv_htif.h
index fb9452cf51..df493fdf6b 100644
--- a/include/hw/riscv/riscv_htif.h
+++ b/include/hw/char/riscv_htif.h
@@ -23,7 +23,6 @@
#include "chardev/char.h"
#include "chardev/char-fe.h"
#include "exec/memory.h"
-#include "target/riscv/cpu.h"
#define TYPE_HTIF_UART "riscv.htif.uart"
@@ -31,29 +30,25 @@ typedef struct HTIFState {
int allow_tohost;
int fromhost_inprogress;
+ uint64_t tohost;
+ uint64_t fromhost;
hwaddr tohost_offset;
hwaddr fromhost_offset;
- uint64_t tohost_size;
- uint64_t fromhost_size;
MemoryRegion mmio;
- MemoryRegion *address_space;
- MemoryRegion *main_mem;
- void *main_mem_ram_ptr;
- CPURISCVState *env;
CharBackend chr;
uint64_t pending_read;
} HTIFState;
-extern const VMStateDescription vmstate_htif;
-extern const MemoryRegionOps htif_io_ops;
+extern const char *sig_file;
+extern uint8_t line_size;
/* HTIF symbol callback */
void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value,
uint64_t st_size);
/* legacy pre qom */
-HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem,
- CPURISCVState *env, Chardev *chr);
+HTIFState *htif_mm_init(MemoryRegion *address_space, Chardev *chr,
+ uint64_t nonelf_base, bool custom_base);
#endif
diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h
index 535fa23a2b..6e14099ee7 100644
--- a/include/hw/char/serial.h
+++ b/include/hw/char/serial.h
@@ -31,10 +31,11 @@
#include "qemu/fifo8.h"
#include "chardev/char.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */
-typedef struct SerialState {
+struct SerialState {
DeviceState parent;
uint16_t divider;
@@ -60,7 +61,7 @@ typedef struct SerialState {
uint32_t baudbase;
uint32_t tsr_retry;
guint watch_tag;
- uint32_t wakeup;
+ bool wakeup;
/* Time when the last byte was successfully sent out of the tsr */
uint64_t last_xmit_ts;
@@ -77,22 +78,17 @@ typedef struct SerialState {
QEMUTimer *modem_status_poll;
MemoryRegion io;
-} SerialState;
+};
+typedef struct SerialState SerialState;
-typedef struct SerialMM {
+struct SerialMM {
SysBusDevice parent;
SerialState serial;
uint8_t regshift;
uint8_t endianness;
-} SerialMM;
-
-typedef struct SerialIO {
- SysBusDevice parent;
-
- SerialState serial;
-} SerialIO;
+};
extern const VMStateDescription vmstate_serial;
extern const MemoryRegionOps serial_io_ops;
@@ -100,13 +96,10 @@ extern const MemoryRegionOps serial_io_ops;
void serial_set_frequency(SerialState *s, uint32_t frequency);
#define TYPE_SERIAL "serial"
-#define SERIAL(s) OBJECT_CHECK(SerialState, (s), TYPE_SERIAL)
+OBJECT_DECLARE_SIMPLE_TYPE(SerialState, SERIAL)
#define TYPE_SERIAL_MM "serial-mm"
-#define SERIAL_MM(s) OBJECT_CHECK(SerialMM, (s), TYPE_SERIAL_MM)
-
-#define TYPE_SERIAL_IO "serial-io"
-#define SERIAL_IO(s) OBJECT_CHECK(SerialIO, (s), TYPE_SERIAL_IO)
+OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM)
SerialMM *serial_mm_init(MemoryRegion *address_space,
hwaddr base, int regshift,
@@ -119,5 +112,7 @@ SerialMM *serial_mm_init(MemoryRegion *address_space,
#define TYPE_ISA_SERIAL "isa-serial"
void serial_hds_isa_init(ISABus *bus, int from, int to);
+void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase);
+void isa_serial_set_enabled(ISADevice *serial, bool enabled);
#endif
diff --git a/include/hw/char/shakti_uart.h b/include/hw/char/shakti_uart.h
new file mode 100644
index 0000000000..526c408233
--- /dev/null
+++ b/include/hw/char/shakti_uart.h
@@ -0,0 +1,74 @@
+/*
+ * SHAKTI UART
+ *
+ * Copyright (c) 2021 Vijai Kumar K <vijai@behindbytes.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SHAKTI_UART_H
+#define HW_SHAKTI_UART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+
+#define SHAKTI_UART_BAUD 0x00
+#define SHAKTI_UART_TX 0x04
+#define SHAKTI_UART_RX 0x08
+#define SHAKTI_UART_STATUS 0x0C
+#define SHAKTI_UART_DELAY 0x10
+#define SHAKTI_UART_CONTROL 0x14
+#define SHAKTI_UART_INT_EN 0x18
+#define SHAKTI_UART_IQ_CYCLES 0x1C
+#define SHAKTI_UART_RX_THRES 0x20
+
+#define SHAKTI_UART_STATUS_TX_EMPTY (1 << 0)
+#define SHAKTI_UART_STATUS_TX_FULL (1 << 1)
+#define SHAKTI_UART_STATUS_RX_NOT_EMPTY (1 << 2)
+#define SHAKTI_UART_STATUS_RX_FULL (1 << 3)
+/* 9600 8N1 is the default setting */
+/* Reg value = (50000000 Hz)/(16 * 9600)*/
+#define SHAKTI_UART_BAUD_DEFAULT 0x0145
+#define SHAKTI_UART_CONTROL_DEFAULT 0x0100
+
+#define TYPE_SHAKTI_UART "shakti-uart"
+#define SHAKTI_UART(obj) \
+ OBJECT_CHECK(ShaktiUartState, (obj), TYPE_SHAKTI_UART)
+
+typedef struct {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint32_t uart_baud;
+ uint32_t uart_tx;
+ uint32_t uart_rx;
+ uint32_t uart_status;
+ uint32_t uart_delay;
+ uint32_t uart_control;
+ uint32_t uart_interrupt;
+ uint32_t uart_iq_cycles;
+ uint32_t uart_rx_threshold;
+
+ CharBackend chr;
+} ShaktiUartState;
+
+#endif /* HW_SHAKTI_UART_H */
diff --git a/include/hw/riscv/sifive_uart.h b/include/hw/char/sifive_uart.h
index 65668825a3..7f6c79f8bd 100644
--- a/include/hw/riscv/sifive_uart.h
+++ b/include/hw/char/sifive_uart.h
@@ -21,7 +21,9 @@
#define HW_SIFIVE_UART_H
#include "chardev/char-fe.h"
+#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
enum {
SIFIVE_UART_TXFIFO = 0,
@@ -48,13 +50,12 @@ enum {
#define SIFIVE_UART_GET_TXCNT(txctrl) ((txctrl >> 16) & 0x7)
#define SIFIVE_UART_GET_RXCNT(rxctrl) ((rxctrl >> 16) & 0x7)
+#define SIFIVE_UART_RX_FIFO_SIZE 8
#define TYPE_SIFIVE_UART "riscv.sifive.uart"
+OBJECT_DECLARE_SIMPLE_TYPE(SiFiveUARTState, SIFIVE_UART)
-#define SIFIVE_UART(obj) \
- OBJECT_CHECK(SiFiveUARTState, (obj), TYPE_SIFIVE_UART)
-
-typedef struct SiFiveUARTState {
+struct SiFiveUARTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -62,14 +63,14 @@ typedef struct SiFiveUARTState {
qemu_irq irq;
MemoryRegion mmio;
CharBackend chr;
- uint8_t rx_fifo[8];
- unsigned int rx_fifo_len;
+ uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE];
+ uint8_t rx_fifo_len;
uint32_t ie;
uint32_t ip;
uint32_t txctrl;
uint32_t rxctrl;
uint32_t div;
-} SiFiveUARTState;
+};
SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base,
Chardev *chr, qemu_irq irq);
diff --git a/include/hw/char/stm32f2xx_usart.h b/include/hw/char/stm32f2xx_usart.h
index 8e112671e3..fdfa7424a7 100644
--- a/include/hw/char/stm32f2xx_usart.h
+++ b/include/hw/char/stm32f2xx_usart.h
@@ -27,6 +27,7 @@
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define USART_SR 0x00
#define USART_DR 0x04
@@ -47,16 +48,17 @@
#define USART_SR_TC (1 << 6)
#define USART_SR_RXNE (1 << 5)
-#define USART_CR1_UE (1 << 13)
-#define USART_CR1_RXNEIE (1 << 5)
-#define USART_CR1_TE (1 << 3)
-#define USART_CR1_RE (1 << 2)
+#define USART_CR1_UE (1 << 13)
+#define USART_CR1_TXEIE (1 << 7)
+#define USART_CR1_TCEIE (1 << 6)
+#define USART_CR1_RXNEIE (1 << 5)
+#define USART_CR1_TE (1 << 3)
+#define USART_CR1_RE (1 << 2)
#define TYPE_STM32F2XX_USART "stm32f2xx-usart"
-#define STM32F2XX_USART(obj) \
- OBJECT_CHECK(STM32F2XXUsartState, (obj), TYPE_STM32F2XX_USART)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXUsartState, STM32F2XX_USART)
-typedef struct {
+struct STM32F2XXUsartState {
/* <private> */
SysBusDevice parent_obj;
@@ -73,5 +75,5 @@ typedef struct {
CharBackend chr;
qemu_irq irq;
-} STM32F2XXUsartState;
+};
#endif /* HW_STM32F2XX_USART_H */
diff --git a/include/hw/char/xilinx_uartlite.h b/include/hw/char/xilinx_uartlite.h
index bb32d0fcb3..36d4e8444d 100644
--- a/include/hw/char/xilinx_uartlite.h
+++ b/include/hw/char/xilinx_uartlite.h
@@ -15,24 +15,9 @@
#ifndef XILINX_UARTLITE_H
#define XILINX_UARTLITE_H
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
+#include "qom/object.h"
-static inline DeviceState *xilinx_uartlite_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("xlnx.xps-uartlite");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
#endif
diff --git a/include/hw/clock.h b/include/hw/clock.h
index f822a94220..eb58599131 100644
--- a/include/hw/clock.h
+++ b/include/hw/clock.h
@@ -16,11 +16,24 @@
#include "qom/object.h"
#include "qemu/queue.h"
+#include "qemu/host-utils.h"
+#include "qemu/bitops.h"
#define TYPE_CLOCK "clock"
-#define CLOCK(obj) OBJECT_CHECK(Clock, (obj), TYPE_CLOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(Clock, CLOCK)
-typedef void ClockCallback(void *opaque);
+/*
+ * Argument to ClockCallback functions indicating why the callback
+ * has been called. A mask of these values logically ORed together
+ * is used to specify which events are interesting when the callback
+ * is registered, so these values must all be different bit values.
+ */
+typedef enum ClockEvent {
+ ClockUpdate = 1, /* Clock period has just updated */
+ ClockPreUpdate = 2, /* Clock period is about to update */
+} ClockEvent;
+
+typedef void ClockCallback(void *opaque, ClockEvent event);
/*
* clock store a value representing the clock's period in 2^-32ns unit.
@@ -38,7 +51,6 @@ typedef void ClockCallback(void *opaque);
* macro helpers to convert to hertz / nanosecond
*/
#define CLOCK_PERIOD_FROM_NS(ns) ((ns) * (CLOCK_PERIOD_1SEC / 1000000000llu))
-#define CLOCK_PERIOD_TO_NS(per) ((per) / (CLOCK_PERIOD_1SEC / 1000000000llu))
#define CLOCK_PERIOD_FROM_HZ(hz) (((hz) != 0) ? CLOCK_PERIOD_1SEC / (hz) : 0u)
#define CLOCK_PERIOD_TO_HZ(per) (((per) != 0) ? CLOCK_PERIOD_1SEC / (per) : 0u)
@@ -49,12 +61,12 @@ typedef void ClockCallback(void *opaque);
* @canonical_path: clock path string cache (used for trace purpose)
* @callback: called when clock changes
* @callback_opaque: argument for @callback
+ * @callback_events: mask of events when callback should be called
* @source: source (or parent in clock tree) of the clock
* @children: list of clocks connected to this one (it is their source)
* @sibling: structure used to form a clock list
*/
-typedef struct Clock Clock;
struct Clock {
/*< private >*/
@@ -67,6 +79,11 @@ struct Clock {
char *canonical_path;
ClockCallback *callback;
void *callback_opaque;
+ unsigned int callback_events;
+
+ /* Ratio of the parent clock to run the child clocks at */
+ uint32_t multiplier;
+ uint32_t divider;
/* Clocks are organized in a clock tree */
Clock *source;
@@ -82,6 +99,11 @@ extern const VMStateDescription vmstate_clock;
VMSTATE_CLOCK_V(field, state, 0)
#define VMSTATE_CLOCK_V(field, state, version) \
VMSTATE_STRUCT_POINTER_V(field, state, version, vmstate_clock, Clock)
+#define VMSTATE_ARRAY_CLOCK(field, state, num) \
+ VMSTATE_ARRAY_CLOCK_V(field, state, num, 0)
+#define VMSTATE_ARRAY_CLOCK_V(field, state, num, version) \
+ VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(field, state, num, version, \
+ vmstate_clock, Clock)
/**
* clock_setup_canonical_path:
@@ -92,14 +114,32 @@ extern const VMStateDescription vmstate_clock;
void clock_setup_canonical_path(Clock *clk);
/**
+ * clock_new:
+ * @parent: the clock parent
+ * @name: the clock object name
+ *
+ * Helper function to create a new clock and parent it to @parent. There is no
+ * need to call clock_setup_canonical_path on the returned clock as it is done
+ * by this function.
+ *
+ * @return the newly created clock
+ */
+Clock *clock_new(Object *parent, const char *name);
+
+/**
* clock_set_callback:
* @clk: the clock to register the callback into
* @cb: the callback function
* @opaque: the argument to the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
*
* Register a callback called on every clock update.
+ * Note that a clock has only one callback: you cannot register
+ * different callback functions for different events.
*/
-void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque);
+void clock_set_callback(Clock *clk, ClockCallback *cb,
+ void *opaque, unsigned int events);
/**
* clock_clear_callback:
@@ -122,22 +162,39 @@ void clock_clear_callback(Clock *clk);
void clock_set_source(Clock *clk, Clock *src);
/**
+ * clock_has_source:
+ * @clk: the clock
+ *
+ * Returns true if the clock has a source clock connected to it.
+ * This is useful for devices which have input clocks which must
+ * be connected by the board/SoC code which creates them. The
+ * device code can use this to check in its realize method that
+ * the clock has been connected.
+ */
+static inline bool clock_has_source(const Clock *clk)
+{
+ return clk->source != NULL;
+}
+
+/**
* clock_set:
* @clk: the clock to initialize.
* @value: the clock's value, 0 means unclocked
*
* Set the local cached period value of @clk to @value.
+ *
+ * @return: true if the clock is changed.
*/
-void clock_set(Clock *clk, uint64_t value);
+bool clock_set(Clock *clk, uint64_t value);
-static inline void clock_set_hz(Clock *clk, unsigned hz)
+static inline bool clock_set_hz(Clock *clk, unsigned hz)
{
- clock_set(clk, CLOCK_PERIOD_FROM_HZ(hz));
+ return clock_set(clk, CLOCK_PERIOD_FROM_HZ(hz));
}
-static inline void clock_set_ns(Clock *clk, unsigned ns)
+static inline bool clock_set_ns(Clock *clk, unsigned ns)
{
- clock_set(clk, CLOCK_PERIOD_FROM_NS(ns));
+ return clock_set(clk, CLOCK_PERIOD_FROM_NS(ns));
}
/**
@@ -147,7 +204,7 @@ static inline void clock_set_ns(Clock *clk, unsigned ns)
* Propagate the clock period that has been previously configured using
* @clock_set(). This will update recursively all connected clocks.
* It is an error to call this function on a clock which has a source.
- * Note: this function must not be called during device inititialization
+ * Note: this function must not be called during device initialization
* or migration.
*/
void clock_propagate(Clock *clk);
@@ -163,8 +220,9 @@ void clock_propagate(Clock *clk);
*/
static inline void clock_update(Clock *clk, uint64_t value)
{
- clock_set(clk, value);
- clock_propagate(clk);
+ if (clock_set(clk, value)) {
+ clock_propagate(clk);
+ }
}
static inline void clock_update_hz(Clock *clk, unsigned hz)
@@ -193,9 +251,81 @@ static inline unsigned clock_get_hz(Clock *clk)
return CLOCK_PERIOD_TO_HZ(clock_get(clk));
}
-static inline unsigned clock_get_ns(Clock *clk)
+/**
+ * clock_ticks_to_ns:
+ * @clk: the clock to query
+ * @ticks: number of ticks
+ *
+ * Returns the length of time in nanoseconds for this clock
+ * to tick @ticks times. Because a clock can have a period
+ * which is not a whole number of nanoseconds, it is important
+ * to use this function when calculating things like timer
+ * expiry deadlines, rather than attempting to obtain a "period
+ * in nanoseconds" value and then multiplying that by a number
+ * of ticks.
+ *
+ * The result could in theory be too large to fit in a 64-bit
+ * value if the number of ticks and the clock period are both
+ * large; to avoid overflow the result will be saturated to INT64_MAX
+ * (because this is the largest valid input to the QEMUTimer APIs).
+ * Since INT64_MAX nanoseconds is almost 300 years, anything with
+ * an expiry later than that is in the "will never happen" category
+ * and callers can reasonably not special-case the saturated result.
+ */
+static inline uint64_t clock_ticks_to_ns(const Clock *clk, uint64_t ticks)
+{
+ uint64_t ns_low, ns_high;
+
+ /*
+ * clk->period is the period in units of 2^-32 ns, so
+ * (clk->period * ticks) is the required length of time in those
+ * units, and we can convert to nanoseconds by multiplying by
+ * 2^32, which is the same as shifting the 128-bit multiplication
+ * result right by 32.
+ */
+ mulu64(&ns_low, &ns_high, clk->period, ticks);
+ if (ns_high & MAKE_64BIT_MASK(31, 33)) {
+ return INT64_MAX;
+ }
+ return ns_low >> 32 | ns_high << 32;
+}
+
+/**
+ * clock_ns_to_ticks:
+ * @clk: the clock to query
+ * @ns: duration in nanoseconds
+ *
+ * Returns the number of ticks this clock would make in the given
+ * number of nanoseconds. Because a clock can have a period which
+ * is not a whole number of nanoseconds, it is important to use this
+ * function rather than attempting to obtain a "period in nanoseconds"
+ * value and then dividing the duration by that value.
+ *
+ * If the clock is stopped (ie it has period zero), returns 0.
+ *
+ * For some inputs the result could overflow a 64-bit value (because
+ * the clock's period is short and the duration is long). In these
+ * cases we truncate the result to a 64-bit value. This is on the
+ * assumption that generally the result is going to be used to report
+ * a 32-bit or 64-bit guest register value, so wrapping either cannot
+ * happen or is the desired behaviour.
+ */
+static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
{
- return CLOCK_PERIOD_TO_NS(clock_get(clk));
+ /*
+ * ticks = duration_in_ns / period_in_ns
+ * = ns / (period / 2^32)
+ * = (ns * 2^32) / period
+ * The hi, lo inputs to divu128() are (ns << 32) as a 128 bit value.
+ */
+ uint64_t lo = ns << 32;
+ uint64_t hi = ns >> 32;
+ if (clk->period == 0) {
+ return 0;
+ }
+
+ divu128(&lo, &hi, clk->period);
+ return lo;
}
/**
@@ -209,17 +339,43 @@ static inline bool clock_is_enabled(const Clock *clk)
return clock_get(clk) != 0;
}
-static inline void clock_init(Clock *clk, uint64_t value)
-{
- clock_set(clk, value);
-}
-static inline void clock_init_hz(Clock *clk, uint64_t value)
-{
- clock_set_hz(clk, value);
-}
-static inline void clock_init_ns(Clock *clk, uint64_t value)
-{
- clock_set_ns(clk, value);
-}
+/**
+ * clock_display_freq: return human-readable representation of clock frequency
+ * @clk: clock
+ *
+ * Return a string which has a human-readable representation of the
+ * clock's frequency, e.g. "33.3 MHz". This is intended for debug
+ * and display purposes.
+ *
+ * The caller is responsible for freeing the string with g_free().
+ */
+char *clock_display_freq(Clock *clk);
+
+/**
+ * clock_set_mul_div: set multiplier/divider for child clocks
+ * @clk: clock
+ * @multiplier: multiplier value
+ * @divider: divider value
+ *
+ * @return: true if the clock is changed.
+ *
+ * By default, a Clock's children will all run with the same period
+ * as their parent. This function allows you to adjust the multiplier
+ * and divider used to derive the child clock frequency.
+ * For example, setting a multiplier of 2 and a divider of 3
+ * will run child clocks with a period 2/3 of the parent clock,
+ * so if the parent clock is an 8MHz clock the children will
+ * be 12MHz.
+ *
+ * Setting the multiplier to 0 will stop the child clocks.
+ * Setting the divider to 0 is a programming error (diagnosed with
+ * an assertion failure).
+ * Setting a multiplier value that results in the child period
+ * overflowing is not diagnosed.
+ *
+ * Note that this function does not call clock_propagate(); the
+ * caller should do that if necessary.
+ */
+bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider);
#endif /* QEMU_HW_CLOCK_H */
diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h
new file mode 100644
index 0000000000..24dad45ab9
--- /dev/null
+++ b/include/hw/core/accel-cpu.h
@@ -0,0 +1,38 @@
+/*
+ * Accelerator interface, specializes CPUClass
+ * This header is used only by target-specific code.
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_CPU_H
+#define ACCEL_CPU_H
+
+/*
+ * This header is used to define new accelerator-specific target-specific
+ * accelerator cpu subclasses.
+ * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
+ *
+ * Do not try to use for any other purpose than the implementation of new
+ * subclasses in target/, or the accel implementation itself in accel/
+ */
+
+#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
+#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
+typedef struct AccelCPUClass AccelCPUClass;
+DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
+
+typedef struct AccelCPUClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ void (*cpu_class_init)(CPUClass *cc);
+ void (*cpu_instance_init)(CPUState *cpu);
+ bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
+} AccelCPUClass;
+
+#endif /* ACCEL_CPU_H */
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 8f145733ce..ec14f74ce5 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -23,30 +23,20 @@
#include "hw/qdev-core.h"
#include "disas/dis-asm.h"
#include "exec/hwaddr.h"
+#include "exec/vaddr.h"
#include "exec/memattrs.h"
+#include "exec/tlb-common.h"
#include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h"
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
-#include "qemu/plugin.h"
+#include "qom/object.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
void *opaque);
/**
- * vaddr:
- * Type wide enough to contain any #target_ulong virtual address.
- */
-typedef uint64_t vaddr;
-#define VADDR_PRId PRId64
-#define VADDR_PRIu PRIu64
-#define VADDR_PRIo PRIo64
-#define VADDR_PRIx PRIx64
-#define VADDR_PRIX PRIX64
-#define VADDR_MAX UINT64_MAX
-
-/**
* SECTION:cpu
* @section_id: QEMU-cpu
* @title: CPU Class
@@ -61,41 +51,65 @@ typedef uint64_t vaddr;
*/
#define CPU(obj) ((CPUState *)(obj))
-#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
-#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
+/*
+ * The class checkers bring in CPU_GET_CLASS() which is potentially
+ * expensive given the eventual call to
+ * object_class_dynamic_cast_assert(). Because of this the CPUState
+ * has a cached value for the class in cs->cc which is set up in
+ * cpu_exec_realizefn() for use in hot code paths.
+ */
+typedef struct CPUClass CPUClass;
+DECLARE_CLASS_CHECKERS(CPUClass, CPU,
+ TYPE_CPU)
+
+/**
+ * OBJECT_DECLARE_CPU_TYPE:
+ * @CpuInstanceType: instance struct name
+ * @CpuClassType: class struct name
+ * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
+ *
+ * This macro is typically used in "cpu-qom.h" header file, and will:
+ *
+ * - create the typedefs for the CPU object and class structs
+ * - register the type for use with g_autoptr
+ * - provide three standard type cast functions
+ *
+ * The object struct and class struct need to be declared manually.
+ */
+#define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
+ typedef struct ArchCPU CpuInstanceType; \
+ OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
typedef enum MMUAccessType {
MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2
+#define MMU_ACCESS_COUNT 3
} MMUAccessType;
typedef struct CPUWatchpoint CPUWatchpoint;
-struct TranslationBlock;
+/* see accel-cpu.h */
+struct AccelCPUClass;
+
+/* see sysemu-cpu-ops.h */
+struct SysemuCPUOps;
/**
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
- * instantiatable CPU type.
+ * instantiatable CPU type.
* @parse_features: Callback to parse command line arguments.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do.
- * @do_interrupt: Callback for interrupt handling.
- * @do_unaligned_access: Callback for unaligned access handling, if
- * the target defines #TARGET_ALIGNED_ONLY.
- * @do_transaction_failed: Callback for handling failed memory transactions
- * (ie bus faults or external aborts; not MMU faults)
- * @virtio_is_big_endian: Callback to return %true if a CPU which supports
- * runtime configurable endianness is currently big-endian. Non-configurable
- * CPUs can use the default implementation of this method. This method should
- * not be used by any callers other than the pre-1.0 virtio devices.
+ * @mmu_index: Callback for choosing softmmu mmu index;
+ * may be used internally by memory_rw_debug without TCG.
* @memory_rw_debug: Callback for GDB memory access.
* @dump_state: Callback for dumping state.
- * @dump_statistics: Callback for dumping statistics.
+ * @query_cpu_fast:
+ * Fill in target specific information for the "query-cpus-fast"
+ * QAPI call.
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
- * @get_paging_enabled: Callback for inquiring whether paging is enabled.
- * @get_memory_mapping: Callback for obtaining the memory mappings.
* @set_pc: Callback for setting the Program Counter register. This
* should have the semantics used by the target architecture when
* setting the PC from a source such as an ELF file entry point;
@@ -104,59 +118,29 @@ struct TranslationBlock;
* If the target behaviour here is anything other than "set
* the PC register to the value passed in" then the target must
* also implement the synchronize_from_tb hook.
- * @synchronize_from_tb: Callback for synchronizing state from a TCG
- * #TranslationBlock. This is called when we abandon execution
- * of a TB before starting it, and must set all parts of the CPU
- * state which the previous TB in the chain may not have updated.
- * This always includes at least the program counter; some targets
- * will need to do more. If this hook is not implemented then the
- * default is to call @set_pc(tb->pc).
- * @tlb_fill: Callback for handling a softmmu tlb miss or user-only
- * address fault. For system mode, if the access is valid, call
- * tlb_set_page and return true; if the access is invalid, and
- * probe is true, return false; otherwise raise an exception and
- * do not return. For user-only mode, always raise an exception
- * and do not return.
- * @get_phys_page_debug: Callback for obtaining a physical address.
- * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
- * associated memory transaction attributes to use for the access.
- * CPUs which use memory transaction attributes should implement this
- * instead of get_phys_page_debug.
- * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
- * a memory access with the specified memory transaction attributes.
+ * @get_pc: Callback for getting the Program Counter register.
+ * As above, with the semantics of the target architecture.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
- * @debug_check_watchpoint: Callback: return true if the architectural
- * watchpoint whose address has matched should really fire.
- * @debug_excp_handler: Callback for handling debug exceptions.
- * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
- * 64-bit VM coredump.
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
- * note to a 32-bit VM coredump.
- * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
- * 32-bit VM coredump.
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
- * note to a 32-bit VM coredump.
- * @vmsd: State description for migration.
- * @gdb_num_core_regs: Number of core registers accessible to GDB.
+ * @gdb_adjust_breakpoint: Callback for adjusting the address of a
+ * breakpoint. Used by AVR to handle a gdb mis-feature with
+ * its Harvard architecture split code and data.
+ * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
+ * from @gdb_core_xml_file.
* @gdb_core_xml_file: File name for core registers GDB XML description.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
* before the insn which triggers a watchpoint rather than after it.
* @gdb_arch_name: Optional callback that returns the architecture name known
* to GDB. The caller must free the returned string with g_free.
- * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
- * gdb stub. Returns a pointer to the XML contents for the specified XML file
- * or NULL if the CPU doesn't have a dynamically generated content for it.
- * @cpu_exec_enter: Callback for cpu_exec preparation.
- * @cpu_exec_exit: Callback for cpu_exec cleanup.
- * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
* @disas_set_info: Setup architecture specific components of disassembly info
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
+ * @deprecation_note: If this CPUClass is deprecated, this field provides
+ * related information.
*
* Represents a CPU family or model.
*/
-typedef struct CPUClass {
+struct CPUClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
@@ -164,65 +148,180 @@ typedef struct CPUClass {
ObjectClass *(*class_by_name)(const char *cpu_model);
void (*parse_features)(const char *typename, char *str, Error **errp);
- int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
- void (*do_interrupt)(CPUState *cpu);
- void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr);
- void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
- unsigned size, MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr);
- bool (*virtio_is_big_endian)(CPUState *cpu);
+ int (*mmu_index)(CPUState *cpu, bool ifetch);
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
uint8_t *buf, int len, bool is_write);
void (*dump_state)(CPUState *cpu, FILE *, int flags);
- GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
- void (*dump_statistics)(CPUState *cpu, int flags);
+ void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
int64_t (*get_arch_id)(CPUState *cpu);
- bool (*get_paging_enabled)(const CPUState *cpu);
- void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
- Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
- void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
- bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
- hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
- hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs);
- int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
+ vaddr (*get_pc)(CPUState *cpu);
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
- bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
- void (*debug_excp_handler)(CPUState *cpu);
-
- int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
- int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
- int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
- int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
-
- const VMStateDescription *vmsd;
+ vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
+
const char *gdb_core_xml_file;
- gchar * (*gdb_arch_name)(CPUState *cpu);
- const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
- void (*cpu_exec_enter)(CPUState *cpu);
- void (*cpu_exec_exit)(CPUState *cpu);
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ const gchar * (*gdb_arch_name)(CPUState *cpu);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
- vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
- void (*tcg_initialize)(void);
- /* Keep non-pointer data at the end to minimize holes. */
+ const char *deprecation_note;
+ struct AccelCPUClass *accel_cpu;
+
+ /* when system emulation is not available, this pointer is NULL */
+ const struct SysemuCPUOps *sysemu_ops;
+
+ /* when TCG is not available, this pointer is NULL */
+ const TCGCPUOps *tcg_ops;
+
+ /*
+ * if not NULL, this is called in order for the CPUClass to initialize
+ * class data that depends on the accelerator, see accel/accel-common.c.
+ */
+ void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
+
+ /*
+ * Keep non-pointer data at the end to minimize holes.
+ */
+ int reset_dump_flags;
int gdb_num_core_regs;
bool gdb_stop_before_watchpoint;
-} CPUClass;
+};
+
+/*
+ * Fix the number of mmu modes to 16, which is also the maximum
+ * supported by the softmmu tlb api.
+ */
+#define NB_MMU_MODES 16
+
+/* Use a fully associative victim tlb of 8 entries. */
+#define CPU_VTLB_SIZE 8
+
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
+ */
+typedef struct CPUTLBEntryFull {
+ /*
+ * @xlat_section contains:
+ * - in the lower TARGET_PAGE_BITS, a physical section number
+ * - with the lower TARGET_PAGE_BITS masked off, an offset which
+ * must be added to the virtual address to obtain:
+ * + the ram_addr_t of the target RAM (if the physical section
+ * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+ * + the offset within the target MemoryRegion (otherwise)
+ */
+ hwaddr xlat_section;
+
+ /*
+ * @phys_addr contains the physical address in the address space
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
+ */
+ hwaddr phys_addr;
+
+ /* @attrs contains the memory transaction attributes for the page. */
+ MemTxAttrs attrs;
+
+ /* @prot contains the complete protections for the page. */
+ uint8_t prot;
+
+ /* @lg_page_size contains the log2 of the page size. */
+ uint8_t lg_page_size;
+
+ /* Additional tlb flags requested by tlb_fill. */
+ uint8_t tlb_fill_flags;
+
+ /*
+ * Additional tlb flags for use by the slow path. If non-zero,
+ * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+ */
+ uint8_t slow_flags[MMU_ACCESS_COUNT];
+
+ /*
+ * Allow target-specific additions to this structure.
+ * This may be used to cache items from the guest cpu
+ * page tables for later use by the implementation.
+ */
+ union {
+ /*
+ * Cache the attrs and shareability fields from the page table entry.
+ *
+ * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
+ * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
+ * For shareability and guarded, as in the SH and GP fields respectively
+ * of the VMSAv8-64 PTEs.
+ */
+ struct {
+ uint8_t pte_attrs;
+ uint8_t shareability;
+ bool guarded;
+ } arm;
+ } extra;
+} CPUTLBEntryFull;
+
+/*
+ * Data elements that are per MMU mode, minus the bits accessed by
+ * the TCG fast path.
+ */
+typedef struct CPUTLBDesc {
+ /*
+ * Describe a region covering all of the large pages allocated
+ * into the tlb. When any page within this region is flushed,
+ * we must flush the entire tlb. The region is matched if
+ * (addr & large_page_mask) == large_page_addr.
+ */
+ vaddr large_page_addr;
+ vaddr large_page_mask;
+ /* host time (in ns) at the beginning of the time window */
+ int64_t window_begin_ns;
+ /* maximum number of entries observed in the window */
+ size_t window_max_entries;
+ size_t n_used_entries;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
+ /* The tlb victim table, in two parts. */
+ CPUTLBEntry vtable[CPU_VTLB_SIZE];
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
+ CPUTLBEntryFull *fulltlb;
+} CPUTLBDesc;
+
+/*
+ * Data elements that are shared between all MMU modes.
+ */
+typedef struct CPUTLBCommon {
+ /* Serialize updates to f.table and d.vtable, and others as noted. */
+ QemuSpin lock;
+ /*
+ * Within dirty, for each bit N, modifications have been made to
+ * mmu_idx N since the last time that mmu_idx was flushed.
+ * Protected by tlb_c.lock.
+ */
+ uint16_t dirty;
+ /*
+ * Statistics. These are not lock protected, but are read and
+ * written atomically. This allows the monitor to print a snapshot
+ * of the stats without interfering with the cpu.
+ */
+ size_t full_flush_count;
+ size_t part_flush_count;
+ size_t elide_flush_count;
+} CPUTLBCommon;
+
+/*
+ * The entire softmmu tlb, for all MMU modes.
+ * The meaning of each of the MMU modes is defined in the target code.
+ * Since this is placed within CPUNegativeOffsetState, the smallest
+ * negative offsets are at the end of the struct.
+ */
+typedef struct CPUTLB {
+#ifdef CONFIG_TCG
+ CPUTLBCommon c;
+ CPUTLBDesc d[NB_MMU_MODES];
+ CPUTLBDescFast f[NB_MMU_MODES];
+#endif
+} CPUTLB;
/*
* Low 16 bits: number of cycles left, used only in icount mode.
@@ -234,7 +333,7 @@ typedef struct CPUClass {
typedef union IcountDecr {
uint32_t u32;
struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint16_t high;
uint16_t low;
#else
@@ -244,6 +343,16 @@ typedef union IcountDecr {
} u16;
} IcountDecr;
+/*
+ * Elements of CPUState most efficiently accessed from CPUArchState,
+ * via small negative offsets.
+ */
+typedef struct CPUNegativeOffsetState {
+ CPUTLB tlb;
+ IcountDecr icount_decr;
+ bool can_do_io;
+} CPUNegativeOffsetState;
+
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
@@ -259,26 +368,9 @@ struct CPUWatchpoint {
QTAILQ_ENTRY(CPUWatchpoint) entry;
};
-#ifdef CONFIG_PLUGIN
-/*
- * For plugins we sometime need to save the resolved iotlb data before
- * the memory regions get moved around by io_writex.
- */
-typedef struct SavedIOTLB {
- hwaddr addr;
- MemoryRegionSection *section;
- hwaddr mr_offset;
-} SavedIOTLB;
-#endif
-
struct KVMState;
struct kvm_run;
-struct hax_vcpu_state;
-
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
/* work queue */
/* The union type allows passing of 64 bit target pointers on 32 bit
@@ -302,7 +394,6 @@ typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
struct qemu_work_item;
#define CPU_UNSET_NUMA_NODE_ID -1
-#define CPU_TRACE_DSTATE_MAX_EVENTS 32
/**
* CPUState:
@@ -312,8 +403,11 @@ struct qemu_work_item;
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
* QOM parent.
+ * Under TCG this value is propagated to @tcg_cflags.
+ * See TranslationBlock::TCG CF_CLUSTER_MASK.
+ * @tcg_cflags: Pre-computed cflags for this cpu.
* @nr_cores: Number of cores within this CPU package.
- * @nr_threads: Number of threads within this CPU.
+ * @nr_threads: Number of threads within this CPU core.
* @running: #true if CPU is currently running (lockless).
* @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
* valid under cpu_list_lock.
@@ -326,38 +420,42 @@ struct qemu_work_item;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
- * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
- * requires that IO only be performed on the last instruction of a TB
- * so that interrupts take effect immediately.
+ * @neg.can_do_io: True if memory-mapped IO is allowed.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
- * @env_ptr: Pointer to subclass-specific CPUArchState field.
- * @icount_decr_ptr: Pointer to IcountDecr field within subclass.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
- * @next_cpu: Next CPU sharing TB cache.
+ * @node: QTAILQ of CPUs sharing TB cache.
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
+ * @accel: Pointer to accelerator specific state.
* @kvm_fd: vCPU file descriptor for KVM.
* @work_mutex: Lock to prevent multiple access to @work_list.
* @work_list: List of pending asynchronous work.
- * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
- * to @trace_dstate).
- * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
- * @plugin_mask: Plugin event bitmap. Modified only via async work.
+ * @plugin_mem_cbs: active plugin memory callbacks
+ * @plugin_state: per-CPU plugin state
* @ignore_memory_transaction_failures: Cached copy of the MachineState
* flag of the same name: allows the board to suppress calling of the
* CPU do_transaction_failed hook function.
+ * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
+ * ring is enabled.
+ * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
+ * dirty ring structure.
*
* State of one CPU core or thread.
+ *
+ * Align, in order to match possible alignment required by CPUArchState,
+ * and eliminate a hole between CPUState and CPUArchState within ArchCPU.
*/
struct CPUState {
/*< private >*/
DeviceState parent_obj;
+ /* cache to avoid expensive CPU_GET_CLASS */
+ CPUClass *cc;
/*< public >*/
int nr_cores;
@@ -365,7 +463,7 @@ struct CPUState {
struct QemuThread *thread;
#ifdef _WIN32
- HANDLE hThread;
+ QemuSemaphore sem;
#endif
int thread_id;
bool running, has_waiter;
@@ -374,10 +472,14 @@ struct CPUState {
bool created;
bool stop;
bool stopped;
+
+ /* Should CPU start in powered-off state? */
+ bool start_powered_off;
+
bool unplug;
bool crash_occurred;
bool exit_request;
- bool in_exclusive_context;
+ int exclusive_context_count;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
@@ -395,13 +497,9 @@ struct CPUState {
AddressSpace *as;
MemoryRegion *memory;
- void *env_ptr; /* CPUArchState */
- IcountDecr *icount_decr_ptr;
+ CPUJumpCache *tb_jmp_cache;
- /* Accessed in parallel; all accesses must be atomic */
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
-
- struct GDBRegisterState *gdb_regs;
+ GArray *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node;
@@ -419,30 +517,36 @@ struct CPUState {
*/
uintptr_t mem_io_pc;
+ /* Only used in KVM */
int kvm_fd;
struct KVMState *kvm_state;
struct kvm_run *kvm_run;
+ struct kvm_dirty_gfn *kvm_dirty_gfns;
+ uint32_t kvm_fetch_index;
+ uint64_t dirty_pages;
+ int kvm_vcpu_stats_fd;
- /* Used for events with 'vcpu' and *without* the 'disabled' properties */
- DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
- DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
-
- DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX);
+ /* Use by accel-block: CPU is executing an ioctl() */
+ QemuLockCnt in_ioctl_lock;
#ifdef CONFIG_PLUGIN
+ /*
+ * The callback pointer stays in the main CPUState as it is
+ * accessed via TCG (see gen_empty_mem_helper).
+ */
GArray *plugin_mem_cbs;
- /* saved iotlb data from io_writex */
- SavedIOTLB saved_iotlb;
+ CPUPluginState *plugin_state;
#endif
/* TODO Move common fields from CPUArchState here. */
int cpu_index;
int cluster_index;
+ uint32_t tcg_cflags;
uint32_t halted;
- uint32_t can_do_io;
int32_t exception_index;
- /* shared by kvm, hax and hvf */
+ AccelCPUState *accel;
+ /* shared by kvm and hvf */
bool vcpu_dirty;
/* Used to keep track of an outstanding cpu throttle thread for migration
@@ -450,36 +554,48 @@ struct CPUState {
*/
bool throttle_thread_scheduled;
- bool ignore_memory_transaction_failures;
+ /*
+ * Sleep throttle_us_per_full microseconds once dirty ring is full
+ * if dirty page rate limit is enabled.
+ */
+ int64_t throttle_us_per_full;
- struct hax_vcpu_state *hax_vcpu;
+ bool ignore_memory_transaction_failures;
- int hvf_fd;
+ /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
+ bool prctl_unalign_sigbus;
/* track IOMMUs whose translations we've cached in the TCG TLB */
GArray *iommu_notifiers;
+
+ /*
+ * MUST BE LAST in order to minimize the displacement to CPUArchState.
+ */
+ char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
+ CPUNegativeOffsetState neg;
};
+/* Validate placement of CPUNegativeOffsetState. */
+QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
+ sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
+
+static inline CPUArchState *cpu_env(CPUState *cpu)
+{
+ /* We validate that CPUArchState follows CPUState in cpu-all.h. */
+ return (CPUArchState *)(cpu + 1);
+}
+
typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
-extern CPUTailQ cpus;
+extern CPUTailQ cpus_queue;
-#define first_cpu QTAILQ_FIRST_RCU(&cpus)
+#define first_cpu QTAILQ_FIRST_RCU(&cpus_queue)
#define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
-#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
+#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node)
#define CPU_FOREACH_SAFE(cpu, next_cpu) \
- QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
+ QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu)
extern __thread CPUState *current_cpu;
-static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
-{
- unsigned int i;
-
- for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
- atomic_set(&cpu->tb_jmp_cache[i], NULL);
- }
-}
-
/**
* qemu_tcg_mttcg_enabled:
* Check whether we are running MultiThread TCG or not.
@@ -502,8 +618,10 @@ bool cpu_paging_enabled(const CPUState *cpu);
* @cpu: The CPU whose memory mappings are to be obtained.
* @list: Where to write the memory mappings to.
* @errp: Pointer for reporting an #Error.
+ *
+ * Returns: %true on success, %false otherwise.
*/
-void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
#if !defined(CONFIG_USER_ONLY)
@@ -564,11 +682,13 @@ GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
* @CPU_DUMP_CODE:
* @CPU_DUMP_FPU: dump FPU register state, not just integer
* @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
+ * @CPU_DUMP_VPU: dump VPU registers
*/
enum CPUDumpFlags {
CPU_DUMP_CODE = 0x00010000,
CPU_DUMP_FPU = 0x00020000,
CPU_DUMP_CCOP = 0x00040000,
+ CPU_DUMP_VPU = 0x00080000,
};
/**
@@ -580,16 +700,6 @@ enum CPUDumpFlags {
*/
void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
-/**
- * cpu_dump_statistics:
- * @cpu: The CPU whose state is to be dumped.
- * @flags: Flags what to dump.
- *
- * Dump CPU statistics to the current monitor if we have one, else to
- * stdout.
- */
-void cpu_dump_statistics(CPUState *cpu, int flags);
-
#ifndef CONFIG_USER_ONLY
/**
* cpu_get_phys_page_attrs_debug:
@@ -604,18 +714,8 @@ void cpu_dump_statistics(CPUState *cpu, int flags);
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
-static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->get_phys_page_attrs_debug) {
- return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
- }
- /* Fallback for CPUs which don't implement the _attrs_ hook */
- *attrs = MEMTXATTRS_UNSPECIFIED;
- return cc->get_phys_page_debug(cpu, addr);
-}
+hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
/**
* cpu_get_phys_page_debug:
@@ -627,12 +727,7 @@ static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
*
* Returns: Corresponding physical page address or -1 if no page found.
*/
-static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
-{
- MemTxAttrs attrs = {};
-
- return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
-}
+hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
/** cpu_asidx_from_attrs:
* @cpu: CPU
@@ -641,17 +736,16 @@ static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
* Returns the address space index specifying the CPU AddressSpace
* to use for a memory access with the given transaction attributes.
*/
-static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
- int ret = 0;
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
- if (cc->asidx_from_attrs) {
- ret = cc->asidx_from_attrs(cpu, attrs);
- assert(ret < cpu->num_ases && ret >= 0);
- }
- return ret;
-}
+/**
+ * cpu_virtio_is_big_endian:
+ * @cpu: CPU
+
+ * Returns %true if a CPU which supports runtime configurable endianness
+ * is currently big-endian.
+ */
+bool cpu_virtio_is_big_endian(CPUState *cpu);
#endif /* CONFIG_USER_ONLY */
@@ -678,13 +772,27 @@ void cpu_reset(CPUState *cpu);
* @typename: The CPU base type.
* @cpu_model: The model string without any parameters.
*
- * Looks up a CPU #ObjectClass matching name @cpu_model.
+ * Looks up a concrete CPU #ObjectClass matching name @cpu_model.
*
- * Returns: A #CPUClass or %NULL if not matching class is found.
+ * Returns: A concrete #CPUClass or %NULL if no matching class is found
+ * or if the matching class is abstract.
*/
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
/**
+ * cpu_model_from_type:
+ * @typename: The CPU type name
+ *
+ * Extract the CPU model name from the CPU type name. The
+ * CPU type name is either the combination of the CPU model
+ * name and suffix, or same to the CPU model name.
+ *
+ * Returns: CPU model name or NULL if the CPU class doesn't exist
+ * The user should g_free() the string once no longer needed.
+ */
+char *cpu_model_from_type(const char *typename);
+
+/**
* cpu_create:
* @typename: The CPU type.
*
@@ -805,7 +913,7 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data
*/
static inline bool cpu_in_exclusive_context(const CPUState *cpu)
{
- return cpu->in_exclusive_context;
+ return cpu->exclusive_context_count;
}
/**
@@ -838,12 +946,6 @@ bool cpu_exists(int64_t id);
*/
CPUState *cpu_by_arch_id(int64_t id);
-#ifndef CONFIG_USER_ONLY
-
-typedef void (*CPUInterruptHandler)(CPUState *, int);
-
-extern CPUInterruptHandler cpu_interrupt_handler;
-
/**
* cpu_interrupt:
* @cpu: The CPU to set an interrupt on.
@@ -851,47 +953,9 @@ extern CPUInterruptHandler cpu_interrupt_handler;
*
* Invokes the interrupt handler.
*/
-static inline void cpu_interrupt(CPUState *cpu, int mask)
-{
- cpu_interrupt_handler(cpu, mask);
-}
-
-#else /* USER_ONLY */
void cpu_interrupt(CPUState *cpu, int mask);
-#endif /* USER_ONLY */
-
-#ifdef NEED_CPU_H
-
-#ifdef CONFIG_SOFTMMU
-static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
-}
-
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response,
- uintptr_t retaddr)
-{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
- cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
- mmu_idx, attrs, response, retaddr);
- }
-}
-#endif
-
-#endif /* NEED_CPU_H */
-
/**
* cpu_set_pc:
* @cpu: The CPU to set the program counter for.
@@ -932,14 +996,6 @@ void cpu_exit(CPUState *cpu);
void cpu_resume(CPUState *cpu);
/**
- * cpu_remove:
- * @cpu: The CPU to remove.
- *
- * Requests the CPU to be removed.
- */
-void cpu_remove(CPUState *cpu);
-
- /**
* cpu_remove_sync:
* @cpu: The CPU to remove.
*
@@ -1020,9 +1076,10 @@ void cpu_single_step(CPUState *cpu, int enabled);
#define BP_GDB 0x10
#define BP_CPU 0x20
#define BP_ANY (BP_GDB | BP_CPU)
-#define BP_WATCHPOINT_HIT_READ 0x40
-#define BP_WATCHPOINT_HIT_WRITE 0x80
-#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
+#define BP_HIT_SHIFT 6
+#define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT)
+#define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT)
+#define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT)
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint);
@@ -1045,7 +1102,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
-#ifdef CONFIG_USER_ONLY
+#if defined(CONFIG_USER_ONLY)
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
@@ -1066,17 +1123,6 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}
-
-static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
- MemTxAttrs atr, int fl, uintptr_t ra)
-{
-}
-
-static inline int cpu_watchpoint_address_matches(CPUState *cpu,
- vaddr addr, vaddr len)
-{
- return 0;
-}
#else
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
@@ -1084,33 +1130,24 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
+#endif
/**
- * cpu_check_watchpoint:
- * @cpu: cpu context
- * @addr: guest virtual address
- * @len: access length
- * @attrs: memory access attributes
- * @flags: watchpoint access type
- * @ra: unwind return address
- *
- * Check for a watchpoint hit in [addr, addr+len) of the type
- * specified by @flags. Exit via exception with a hit.
- */
-void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
- MemTxAttrs attrs, int flags, uintptr_t ra);
-
-/**
- * cpu_watchpoint_address_matches:
- * @cpu: cpu context
- * @addr: guest virtual address
- * @len: access length
+ * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
+ * @cs: CPUState pointer
*
- * Return the watchpoint flags that apply to [addr, addr+len).
- * If no watchpoint is registered for the range, the result is 0.
+ * The memory callbacks are installed if a plugin has instrumented an
+ * instruction for memory. This can be useful to know if you want to
+ * force a slow path for a series of memory accesses.
*/
-int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
+{
+#ifdef CONFIG_PLUGIN
+ return !!cpu->plugin_mem_cbs;
+#else
+ return false;
#endif
+}
/**
* cpu_get_address_space:
@@ -1122,31 +1159,34 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
*/
AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
-void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
-extern Property cpu_common_props[];
+G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
+ G_GNUC_PRINTF(2, 3);
+
+/* $(top_srcdir)/cpu.c */
+void cpu_class_init_props(DeviceClass *dc);
void cpu_exec_initfn(CPUState *cpu);
-void cpu_exec_realizefn(CPUState *cpu, Error **errp);
+bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
void cpu_exec_unrealizefn(CPUState *cpu);
+void cpu_exec_reset_hold(CPUState *cpu);
/**
* target_words_bigendian:
* Returns true if the (default) endianness of the target is big endian,
* false otherwise. Note that in target-specific code, you can use
- * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
+ * TARGET_BIG_ENDIAN directly instead. On the other hand, common
* code should normally never need to know about the endianness of the
* target, so please do *not* use this function unless you know very well
* what you are doing!
*/
bool target_words_bigendian(void);
+const char *target_name(void);
+
#ifdef NEED_CPU_H
-#ifdef CONFIG_SOFTMMU
+#ifndef CONFIG_USER_ONLY
+
extern const VMStateDescription vmstate_cpu_common;
-#else
-#define vmstate_cpu_common vmstate_dummy
-#endif
#define VMSTATE_CPU() { \
.name = "parent_obj", \
@@ -1155,6 +1195,7 @@ extern const VMStateDescription vmstate_cpu_common;
.flags = VMS_STRUCT, \
.offset = 0, \
}
+#endif /* !CONFIG_USER_ONLY */
#endif /* NEED_CPU_H */
diff --git a/include/hw/core/generic-loader.h b/include/hw/core/generic-loader.h
index 9ffce1c5a3..19d87b39c8 100644
--- a/include/hw/core/generic-loader.h
+++ b/include/hw/core/generic-loader.h
@@ -20,8 +20,9 @@
#include "elf.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct GenericLoaderState {
+struct GenericLoaderState {
/* <private> */
DeviceState parent_obj;
@@ -38,10 +39,9 @@ typedef struct GenericLoaderState {
bool force_raw;
bool data_be;
bool set_pc;
-} GenericLoaderState;
+};
#define TYPE_GENERIC_LOADER "loader"
-#define GENERIC_LOADER(obj) OBJECT_CHECK(GenericLoaderState, (obj), \
- TYPE_GENERIC_LOADER)
+OBJECT_DECLARE_SIMPLE_TYPE(GenericLoaderState, GENERIC_LOADER)
#endif
diff --git a/include/hw/core/resetcontainer.h b/include/hw/core/resetcontainer.h
new file mode 100644
index 0000000000..23db0c7a88
--- /dev/null
+++ b/include/hw/core/resetcontainer.h
@@ -0,0 +1,48 @@
+/*
+ * Reset container
+ *
+ * Copyright (c) 2024 Linaro, Ltd
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_RESETCONTAINER_H
+#define HW_RESETCONTAINER_H
+
+/*
+ * The "reset container" is an object which implements the Resettable
+ * interface. It contains a list of arbitrary other objects which also
+ * implement Resettable. Resetting the reset container resets all the
+ * objects in it.
+ */
+
+#include "qom/object.h"
+
+#define TYPE_RESETTABLE_CONTAINER "resettable-container"
+OBJECT_DECLARE_TYPE(ResettableContainer, ResettableContainerClass, RESETTABLE_CONTAINER)
+
+/**
+ * resettable_container_add: Add a resettable object to the container
+ * @rc: container
+ * @obj: object to add to the container
+ *
+ * Add @obj to the ResettableContainer @rc. @obj must implement the
+ * Resettable interface.
+ *
+ * When @rc is reset, it will reset every object that has been added
+ * to it, in the order they were added.
+ */
+void resettable_container_add(ResettableContainer *rc, Object *obj);
+
+/**
+ * resettable_container_remove: Remove an object from the container
+ * @rc: container
+ * @obj: object to remove from the container
+ *
+ * Remove @obj from the ResettableContainer @rc. @obj must have been
+ * previously added to this container.
+ */
+void resettable_container_remove(ResettableContainer *rc, Object *obj);
+
+#endif
diff --git a/include/hw/core/split-irq.h b/include/hw/core/split-irq.h
index 872a39aa4f..ff8852f407 100644
--- a/include/hw/core/split-irq.h
+++ b/include/hw/core/split-irq.h
@@ -42,9 +42,8 @@
#define MAX_SPLIT_LINES 16
-typedef struct SplitIRQ SplitIRQ;
-#define SPLIT_IRQ(obj) OBJECT_CHECK(SplitIRQ, (obj), TYPE_SPLIT_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(SplitIRQ, SPLIT_IRQ)
struct SplitIRQ {
DeviceState parent_obj;
diff --git a/include/hw/arm/sysbus-fdt.h b/include/hw/core/sysbus-fdt.h
index 340c382cdd..340c382cdd 100644
--- a/include/hw/arm/sysbus-fdt.h
+++ b/include/hw/core/sysbus-fdt.h
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
new file mode 100644
index 0000000000..24d003fe04
--- /dev/null
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -0,0 +1,92 @@
+/*
+ * CPU operations specific to system emulation
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSEMU_CPU_OPS_H
+#define SYSEMU_CPU_OPS_H
+
+#include "hw/core/cpu.h"
+
+/*
+ * struct SysemuCPUOps: System operations specific to a CPU class
+ */
+typedef struct SysemuCPUOps {
+ /**
+ * @get_memory_mapping: Callback for obtaining the memory mappings.
+ */
+ bool (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
+ /**
+ * @get_paging_enabled: Callback for inquiring whether paging is enabled.
+ */
+ bool (*get_paging_enabled)(const CPUState *cpu);
+ /**
+ * @get_phys_page_debug: Callback for obtaining a physical address.
+ */
+ hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
+ /**
+ * @get_phys_page_attrs_debug: Callback for obtaining a physical address
+ * and the associated memory transaction attributes to use for the
+ * access.
+ * CPUs which use memory transaction attributes should implement this
+ * instead of get_phys_page_debug.
+ */
+ hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+ /**
+ * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
+ * a memory access with the specified memory transaction attributes.
+ */
+ int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
+ /**
+ * @get_crash_info: Callback for reporting guest crash information in
+ * GUEST_PANICKED events.
+ */
+ GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
+ /**
+ * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
+ * 32-bit VM coredump.
+ */
+ int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *s);
+ /**
+ * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
+ * 64-bit VM coredump.
+ */
+ int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *s);
+ /**
+ * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
+ * note to a 32-bit VM coredump.
+ */
+ int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
+ DumpState *s);
+ /**
+ * @write_elf64_qemunote: Callback for writing a CPU- and QEMU-specific ELF
+ * note to a 64-bit VM coredump.
+ */
+ int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
+ DumpState *s);
+ /**
+ * @virtio_is_big_endian: Callback to return %true if a CPU which supports
+ * runtime configurable endianness is currently big-endian.
+ * Non-configurable CPUs can use the default implementation of this method.
+ * This method should not be used by any callers other than the pre-1.0
+ * virtio devices.
+ */
+ bool (*virtio_is_big_endian)(CPUState *cpu);
+
+ /**
+ * @legacy_vmsd: Legacy state for migration.
+ * Do not use in new targets, use #DeviceClass::vmsd instead.
+ */
+ const VMStateDescription *legacy_vmsd;
+
+} SysemuCPUOps;
+
+#endif /* SYSEMU_CPU_OPS_H */
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
new file mode 100644
index 0000000000..bf8ff8e3ee
--- /dev/null
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -0,0 +1,224 @@
+/*
+ * TCG CPU-specific operations
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TCG_CPU_OPS_H
+#define TCG_CPU_OPS_H
+
+#include "hw/core/cpu.h"
+
+struct TCGCPUOps {
+ /**
+ * @initialize: Initialize TCG state
+ *
+ * Called when the first CPU is realized.
+ */
+ void (*initialize)(void);
+ /**
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
+ *
+ * This is called when we abandon execution of a TB before starting it,
+ * and must set all parts of the CPU state which the previous TB in the
+ * chain may not have updated.
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
+ *
+ * If more state needs to be restored, the target must implement a
+ * function to restore all the state, and register it here.
+ */
+ void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
+ /**
+ * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
+ *
+ * This is called when we unwind state in the middle of a TB,
+ * usually before raising an exception. Set all part of the CPU
+ * state which are tracked insn-by-insn in the target-specific
+ * arguments to start_insn, passed as @data.
+ */
+ void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
+ const uint64_t *data);
+
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
+ void (*cpu_exec_enter)(CPUState *cpu);
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
+ void (*cpu_exec_exit)(CPUState *cpu);
+ /** @debug_excp_handler: Callback for handling debug exceptions */
+ void (*debug_excp_handler)(CPUState *cpu);
+
+#ifdef NEED_CPU_H
+#ifdef CONFIG_USER_ONLY
+ /**
+ * @fake_user_interrupt: Callback for 'fake exception' handling.
+ *
+ * Simulate 'fake exception' which will be handled outside the
+ * cpu execution loop (hack for x86 user mode).
+ */
+ void (*fake_user_interrupt)(CPUState *cpu);
+
+ /**
+ * record_sigsegv:
+ * @cpu: cpu context
+ * @addr: faulting guest address
+ * @access_type: access was read/write/execute
+ * @maperr: true for invalid page, false for permission fault
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGSEGV with si_code set for @maperr,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide anything to the signal
+ * handler with anything besides the user context registers, and
+ * the siginfo_t, then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
+ * so that a "normal" cpu exception can be raised. In this case,
+ * the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigsegv)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+ /**
+ * record_sigbus:
+ * @cpu: cpu context
+ * @addr: misaligned guest address
+ * @access_type: access was read/write/execute
+ * @ra: host pc for unwinding
+ *
+ * We are about to raise SIGBUS with si_code BUS_ADRALN,
+ * and si_addr set for @addr. Record anything further needed
+ * for the signal ucontext_t.
+ *
+ * If the emulated kernel does not provide the signal handler with
+ * anything besides the user context registers, and the siginfo_t,
+ * then this hook need do nothing and may be omitted.
+ * Otherwise, record the data and return; the caller will raise
+ * the signal, unwind the cpu state, and return to the main loop.
+ *
+ * If it is simpler to re-use the sysemu do_unaligned_access code,
+ * @ra is provided so that a "normal" cpu exception can be raised.
+ * In this case, the signal must be raised by the architecture cpu_loop.
+ */
+ void (*record_sigbus)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+#else
+ /** @do_interrupt: Callback for interrupt handling. */
+ void (*do_interrupt)(CPUState *cpu);
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ /** @cpu_exec_halt: Callback for handling halt in cpu_exec */
+ void (*cpu_exec_halt)(CPUState *cpu);
+ /**
+ * @tlb_fill: Handle a softmmu tlb miss
+ *
+ * If the access is valid, call tlb_set_page and return true;
+ * if the access is invalid and probe is true, return false;
+ * otherwise raise an exception and do not return.
+ */
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+ /**
+ * @do_transaction_failed: Callback for handling failed memory transactions
+ * (ie bus faults or external aborts; not MMU faults)
+ */
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+ /**
+ * @do_unaligned_access: Callback for unaligned access handling
+ * The callback must exit via raising an exception.
+ */
+ G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+ /**
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
+ */
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
+
+ /**
+ * @debug_check_watchpoint: return true if the architectural
+ * watchpoint whose address has matched should really fire, used by ARM
+ * and RISC-V
+ */
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
+
+ /**
+ * @debug_check_breakpoint: return true if the architectural
+ * breakpoint whose PC has matched should really fire.
+ */
+ bool (*debug_check_breakpoint)(CPUState *cpu);
+
+ /**
+ * @io_recompile_replay_branch: Callback for cpu_io_recompile.
+ *
+ * The cpu has been stopped, and cpu_restore_state_from_tb has been
+ * called. If the faulting instruction is in a delay slot, and the
+ * target architecture requires re-execution of the branch, then
+ * adjust the cpu state as required and return true.
+ */
+ bool (*io_recompile_replay_branch)(CPUState *cpu,
+ const TranslationBlock *tb);
+ /**
+ * @need_replay_interrupt: Return %true if @interrupt_request
+ * needs to be recorded for replay purposes.
+ */
+ bool (*need_replay_interrupt)(int interrupt_request);
+#endif /* !CONFIG_USER_ONLY */
+#endif /* NEED_CPU_H */
+
+};
+
+#if defined(CONFIG_USER_ONLY)
+
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs atr, int fl, uintptr_t ra)
+{
+}
+
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
+ vaddr addr, vaddr len)
+{
+ return 0;
+}
+
+#else
+
+/**
+ * cpu_check_watchpoint:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ * @attrs: memory access attributes
+ * @flags: watchpoint access type
+ * @ra: unwind return address
+ *
+ * Check for a watchpoint hit in [addr, addr+len) of the type
+ * specified by @flags. Exit via exception with a hit.
+ */
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
+ MemTxAttrs attrs, int flags, uintptr_t ra);
+
+/**
+ * cpu_watchpoint_address_matches:
+ * @cpu: cpu context
+ * @addr: guest virtual address
+ * @len: access length
+ *
+ * Return the watchpoint flags that apply to [addr, addr+len).
+ * If no watchpoint is registered for the range, the result is 0.
+ */
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
+
+#endif
+
+#endif /* TCG_CPU_OPS_H */
diff --git a/include/hw/cpu/a15mpcore.h b/include/hw/cpu/a15mpcore.h
index b423533d20..75d39e5458 100644
--- a/include/hw/cpu/a15mpcore.h
+++ b/include/hw/cpu/a15mpcore.h
@@ -22,14 +22,14 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic.h"
+#include "qom/object.h"
/* A15MP private memory region. */
#define TYPE_A15MPCORE_PRIV "a15mpcore_priv"
-#define A15MPCORE_PRIV(obj) \
- OBJECT_CHECK(A15MPPrivState, (obj), TYPE_A15MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(A15MPPrivState, A15MPCORE_PRIV)
-typedef struct A15MPPrivState {
+struct A15MPPrivState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -39,6 +39,6 @@ typedef struct A15MPPrivState {
MemoryRegion container;
GICState gic;
-} A15MPPrivState;
+};
#endif
diff --git a/include/hw/cpu/a9mpcore.h b/include/hw/cpu/a9mpcore.h
index 5d67ca22c4..e0396ab6af 100644
--- a/include/hw/cpu/a9mpcore.h
+++ b/include/hw/cpu/a9mpcore.h
@@ -15,12 +15,12 @@
#include "hw/misc/a9scu.h"
#include "hw/timer/arm_mptimer.h"
#include "hw/timer/a9gtimer.h"
+#include "qom/object.h"
#define TYPE_A9MPCORE_PRIV "a9mpcore_priv"
-#define A9MPCORE_PRIV(obj) \
- OBJECT_CHECK(A9MPPrivState, (obj), TYPE_A9MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(A9MPPrivState, A9MPCORE_PRIV)
-typedef struct A9MPPrivState {
+struct A9MPPrivState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -34,6 +34,6 @@ typedef struct A9MPPrivState {
A9GTimerState gtimer;
ARMMPTimerState mptimer;
ARMMPTimerState wdt;
-} A9MPPrivState;
+};
#endif
diff --git a/include/hw/cpu/arm11mpcore.h b/include/hw/cpu/arm11mpcore.h
index 6196109ca2..2cac8c1232 100644
--- a/include/hw/cpu/arm11mpcore.h
+++ b/include/hw/cpu/arm11mpcore.h
@@ -14,12 +14,12 @@
#include "hw/misc/arm11scu.h"
#include "hw/intc/arm_gic.h"
#include "hw/timer/arm_mptimer.h"
+#include "qom/object.h"
#define TYPE_ARM11MPCORE_PRIV "arm11mpcore_priv"
-#define ARM11MPCORE_PRIV(obj) \
- OBJECT_CHECK(ARM11MPCorePriveState, (obj), TYPE_ARM11MPCORE_PRIV)
+OBJECT_DECLARE_SIMPLE_TYPE(ARM11MPCorePriveState, ARM11MPCORE_PRIV)
-typedef struct ARM11MPCorePriveState {
+struct ARM11MPCorePriveState {
SysBusDevice parent_obj;
uint32_t num_cpu;
@@ -30,6 +30,6 @@ typedef struct ARM11MPCorePriveState {
GICState gic;
ARMMPTimerState mptimer;
ARMMPTimerState wdtimer;
-} ARM11MPCorePriveState;
+};
#endif
diff --git a/include/hw/cpu/cluster.h b/include/hw/cpu/cluster.h
index a616501a55..53fbf36af5 100644
--- a/include/hw/cpu/cluster.h
+++ b/include/hw/cpu/cluster.h
@@ -21,6 +21,7 @@
#define HW_CPU_CLUSTER_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
/*
* CPU Cluster type
@@ -54,8 +55,7 @@
*/
#define TYPE_CPU_CLUSTER "cpu-cluster"
-#define CPU_CLUSTER(obj) \
- OBJECT_CHECK(CPUClusterState, (obj), TYPE_CPU_CLUSTER)
+OBJECT_DECLARE_SIMPLE_TYPE(CPUClusterState, CPU_CLUSTER)
/*
* This limit is imposed by TCG, which puts the cluster ID into an
@@ -70,12 +70,12 @@
*
* State of a CPU cluster.
*/
-typedef struct CPUClusterState {
+struct CPUClusterState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
uint32_t cluster_id;
-} CPUClusterState;
+};
#endif
diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h
index 555ad831bb..98ab91647e 100644
--- a/include/hw/cpu/core.h
+++ b/include/hw/cpu/core.h
@@ -10,20 +10,20 @@
#define HW_CPU_CORE_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_CPU_CORE "cpu-core"
-#define CPU_CORE(obj) \
- OBJECT_CHECK(CPUCore, (obj), TYPE_CPU_CORE)
+OBJECT_DECLARE_SIMPLE_TYPE(CPUCore, CPU_CORE)
-typedef struct CPUCore {
+struct CPUCore {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
int core_id;
int nr_threads;
-} CPUCore;
+};
/* Note: topology field names need to be kept in sync with
* 'CpuInstanceProperties' */
diff --git a/include/hw/cris/etraxfs.h b/include/hw/cris/etraxfs.h
index 9e99380e0c..012c4e9974 100644
--- a/include/hw/cris/etraxfs.h
+++ b/include/hw/cris/etraxfs.h
@@ -29,8 +29,9 @@
#include "hw/cris/etraxfs_dma.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
+#include "qapi/error.h"
-DeviceState *etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr,
+DeviceState *etraxfs_eth_init(hwaddr base, int phyaddr,
struct etraxfs_dma_client *dma_out,
struct etraxfs_dma_client *dma_in);
@@ -41,7 +42,7 @@ static inline DeviceState *etraxfs_ser_create(hwaddr addr,
DeviceState *dev;
SysBusDevice *s;
- dev = qdev_new("etraxfs,serial");
+ dev = qdev_new("etraxfs-serial");
s = SYS_BUS_DEVICE(dev);
qdev_prop_set_chr(dev, "chardev", chr);
sysbus_realize_and_unref(s, &error_fatal);
diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h
new file mode 100644
index 0000000000..75e47b6864
--- /dev/null
+++ b/include/hw/cxl/cxl.h
@@ -0,0 +1,70 @@
+/*
+ * QEMU CXL Support
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_H
+#define CXL_H
+
+
+#include "qapi/qapi-types-machine.h"
+#include "qapi/qapi-visit-machine.h"
+#include "hw/pci/pci_host.h"
+#include "cxl_pci.h"
+#include "cxl_component.h"
+#include "cxl_device.h"
+
+#define CXL_CACHE_LINE_SIZE 64
+#define CXL_COMPONENT_REG_BAR_IDX 0
+#define CXL_DEVICE_REG_BAR_IDX 2
+
+#define CXL_WINDOW_MAX 10
+
+typedef struct PXBCXLDev PXBCXLDev;
+
+typedef struct CXLFixedWindow {
+ uint64_t size;
+ char **targets;
+ PXBCXLDev *target_hbs[16];
+ uint8_t num_targets;
+ uint8_t enc_int_ways;
+ uint8_t enc_int_gran;
+ /* Todo: XOR based interleaving */
+ MemoryRegion mr;
+ hwaddr base;
+} CXLFixedWindow;
+
+typedef struct CXLState {
+ bool is_enabled;
+ MemoryRegion host_mr;
+ unsigned int next_mr_idx;
+ GList *fixed_windows;
+ CXLFixedMemoryWindowOptionsList *cfmw_list;
+} CXLState;
+
+struct CXLHost {
+ PCIHostState parent_obj;
+
+ CXLComponentState cxl_cstate;
+ bool passthrough;
+};
+
+#define TYPE_PXB_CXL_HOST "pxb-cxl-host"
+OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST)
+
+#define TYPE_CXL_USP "cxl-upstream"
+
+typedef struct CXLUpstreamPort CXLUpstreamPort;
+DECLARE_INSTANCE_CHECKER(CXLUpstreamPort, CXL_USP, TYPE_CXL_USP)
+CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp);
+
+#define TYPE_CXL_DSP "cxl-downstream"
+
+typedef struct CXLDownstreamPort CXLDownstreamPort;
+DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP)
+
+#endif
diff --git a/include/hw/cxl/cxl_cdat.h b/include/hw/cxl/cxl_cdat.h
new file mode 100644
index 0000000000..17a09066dc
--- /dev/null
+++ b/include/hw/cxl/cxl_cdat.h
@@ -0,0 +1,172 @@
+/*
+ * CXL CDAT Structure
+ *
+ * Copyright (C) 2021 Avery Design Systems, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_CDAT_H
+#define CXL_CDAT_H
+
+#include "hw/cxl/cxl_pci.h"
+#include "hw/pci/pcie_doe.h"
+
+/*
+ * Reference:
+ * Coherent Device Attribute Table (CDAT) Specification, Rev. 1.03, July. 2022
+ * Compute Express Link (CXL) Specification, Rev. 3.1, Aug. 2023
+ */
+/* Table Access DOE - CXL r3.1 8.1.11 */
+#define CXL_DOE_TABLE_ACCESS 2
+#define CXL_DOE_PROTOCOL_CDAT ((CXL_DOE_TABLE_ACCESS << 16) | CXL_VENDOR_ID)
+
+/* Read Entry - CXL r3.1 8.1.11.1 */
+#define CXL_DOE_TAB_TYPE_CDAT 0
+#define CXL_DOE_TAB_ENT_MAX 0xFFFF
+
+/* Read Entry Request - CXL r3.1 8.1.11.1 Table 8-13 */
+#define CXL_DOE_TAB_REQ 0
+typedef struct CDATReq {
+ DOEHeader header;
+ uint8_t req_code;
+ uint8_t table_type;
+ uint16_t entry_handle;
+} QEMU_PACKED CDATReq;
+
+/* Read Entry Response - CXL r3.1 8.1.11.1 Table 8-14 */
+#define CXL_DOE_TAB_RSP 0
+typedef struct CDATRsp {
+ DOEHeader header;
+ uint8_t rsp_code;
+ uint8_t table_type;
+ uint16_t entry_handle;
+} QEMU_PACKED CDATRsp;
+
+/* CDAT Table Format - CDAT Table 1 */
+#define CXL_CDAT_REV 2
+typedef struct CDATTableHeader {
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t reserved[6];
+ uint32_t sequence;
+} QEMU_PACKED CDATTableHeader;
+
+/* CDAT Structure Types - CDAT Table 2 */
+typedef enum {
+ CDAT_TYPE_DSMAS = 0,
+ CDAT_TYPE_DSLBIS = 1,
+ CDAT_TYPE_DSMSCIS = 2,
+ CDAT_TYPE_DSIS = 3,
+ CDAT_TYPE_DSEMTS = 4,
+ CDAT_TYPE_SSLBIS = 5,
+} CDATType;
+
+typedef struct CDATSubHeader {
+ uint8_t type;
+ uint8_t reserved;
+ uint16_t length;
+} CDATSubHeader;
+
+/* Device Scoped Memory Affinity Structure - CDAT Table 3 */
+typedef struct CDATDsmas {
+ CDATSubHeader header;
+ uint8_t DSMADhandle;
+ uint8_t flags;
+#define CDAT_DSMAS_FLAG_NV (1 << 2)
+#define CDAT_DSMAS_FLAG_SHAREABLE (1 << 3)
+#define CDAT_DSMAS_FLAG_HW_COHERENT (1 << 4)
+#define CDAT_DSMAS_FLAG_DYNAMIC_CAP (1 << 5)
+ uint16_t reserved;
+ uint64_t DPA_base;
+ uint64_t DPA_length;
+} CDATDsmas;
+QEMU_BUILD_BUG_ON(sizeof(CDATDsmas) != 24);
+
+/* Device Scoped Latency and Bandwidth Information Structure - CDAT Table 5 */
+typedef struct CDATDslbis {
+ CDATSubHeader header;
+ uint8_t handle;
+ /* Definitions of these fields refer directly to HMAT fields */
+ uint8_t flags;
+ uint8_t data_type;
+ uint8_t reserved;
+ uint64_t entry_base_unit;
+ uint16_t entry[3];
+ uint16_t reserved2;
+} CDATDslbis;
+QEMU_BUILD_BUG_ON(sizeof(CDATDslbis) != 24);
+
+/* Device Scoped Memory Side Cache Information Structure - CDAT Table 6 */
+typedef struct CDATDsmscis {
+ CDATSubHeader header;
+ uint8_t DSMAS_handle;
+ uint8_t reserved[3];
+ uint64_t memory_side_cache_size;
+ uint32_t cache_attributes;
+} QEMU_PACKED CDATDsmscis;
+
+/* Device Scoped Initiator Structure - CDAT Table 7 */
+typedef struct CDATDsis {
+ CDATSubHeader header;
+ uint8_t flags;
+ uint8_t handle;
+ uint16_t reserved;
+} QEMU_PACKED CDATDsis;
+
+/* Device Scoped EFI Memory Type Structure - CDAT Table 8 */
+typedef struct CDATDsemts {
+ CDATSubHeader header;
+ uint8_t DSMAS_handle;
+ uint8_t EFI_memory_type_attr;
+ uint16_t reserved;
+ uint64_t DPA_offset;
+ uint64_t DPA_length;
+} CDATDsemts;
+QEMU_BUILD_BUG_ON(sizeof(CDATDsemts) != 24);
+
+/* Switch Scoped Latency and Bandwidth Information Structure - CDAT Table 9 */
+typedef struct CDATSslbisHeader {
+ CDATSubHeader header;
+ uint8_t data_type;
+ uint8_t reserved[3];
+ uint64_t entry_base_unit;
+} CDATSslbisHeader;
+QEMU_BUILD_BUG_ON(sizeof(CDATSslbisHeader) != 16);
+
+#define CDAT_PORT_ID_USP 0x100
+/* Switch Scoped Latency and Bandwidth Entry - CDAT Table 10 */
+typedef struct CDATSslbe {
+ uint16_t port_x_id;
+ uint16_t port_y_id;
+ uint16_t latency_bandwidth;
+ uint16_t reserved;
+} CDATSslbe;
+QEMU_BUILD_BUG_ON(sizeof(CDATSslbe) != 8);
+
+typedef struct CDATSslbis {
+ CDATSslbisHeader sslbis_header;
+ CDATSslbe sslbe[];
+} CDATSslbis;
+
+typedef struct CDATEntry {
+ void *base;
+ uint32_t length;
+} CDATEntry;
+
+typedef struct CDATObject {
+ CDATEntry *entry;
+ int entry_len;
+
+ int (*build_cdat_table)(CDATSubHeader ***cdat_table, void *priv);
+ void (*free_cdat_table)(CDATSubHeader **cdat_table, int num, void *priv);
+ bool to_update;
+ void *private;
+ char *filename;
+ uint8_t *buf;
+ struct CDATSubHeader **built_buf;
+ int built_buf_len;
+} CDATObject;
+#endif /* CXL_CDAT_H */
diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h
new file mode 100644
index 0000000000..5012fab6f7
--- /dev/null
+++ b/include/hw/cxl/cxl_component.h
@@ -0,0 +1,280 @@
+/*
+ * QEMU CXL Component
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_COMPONENT_H
+#define CXL_COMPONENT_H
+
+/* CXL r3.1 Section 8.2.4: CXL.cache and CXL.mem Registers */
+#define CXL2_COMPONENT_IO_REGION_SIZE 0x1000
+#define CXL2_COMPONENT_CM_REGION_SIZE 0x1000
+#define CXL2_COMPONENT_BLOCK_SIZE 0x10000
+
+#include "qemu/range.h"
+#include "hw/cxl/cxl_cdat.h"
+#include "hw/register.h"
+#include "qapi/error.h"
+
+enum reg_type {
+ CXL2_DEVICE,
+ CXL2_TYPE3_DEVICE,
+ CXL2_LOGICAL_DEVICE,
+ CXL2_ROOT_PORT,
+ CXL2_RC,
+ CXL2_UPSTREAM_PORT,
+ CXL2_DOWNSTREAM_PORT,
+ CXL3_SWITCH_MAILBOX_CCI,
+};
+
+/*
+ * Capability registers are defined at the top of the CXL.cache/mem region and
+ * are packed. For our purposes we will always define the caps in the same
+ * order.
+ * CXL r3.1 Table 8-22: CXL_CAPABILITY_ID Assignment for details.
+ */
+
+/* CXL r3.1 Section 8.2.4.1: CXL Capability Header Register */
+#define CXL_CAPABILITY_VERSION 1
+REG32(CXL_CAPABILITY_HEADER, 0)
+ FIELD(CXL_CAPABILITY_HEADER, ID, 0, 16)
+ FIELD(CXL_CAPABILITY_HEADER, VERSION, 16, 4)
+ FIELD(CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 20, 4)
+ FIELD(CXL_CAPABILITY_HEADER, ARRAY_SIZE, 24, 8)
+
+#define CXLx_CAPABILITY_HEADER(type, offset) \
+ REG32(CXL_##type##_CAPABILITY_HEADER, offset) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, ID, 0, 16) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, VERSION, 16, 4) \
+ FIELD(CXL_##type##_CAPABILITY_HEADER, PTR, 20, 12)
+CXLx_CAPABILITY_HEADER(RAS, 0x4)
+CXLx_CAPABILITY_HEADER(LINK, 0x8)
+CXLx_CAPABILITY_HEADER(HDM, 0xc)
+CXLx_CAPABILITY_HEADER(EXTSEC, 0x10)
+CXLx_CAPABILITY_HEADER(SNOOP, 0x14)
+
+/*
+ * Capability structures contain the actual registers that the CXL component
+ * implements. Some of these are specific to certain types of components, but
+ * this implementation leaves enough space regardless.
+ */
+
+/* CXL r3.1 Section 8.2.4.17: CXL RAS Capability Structure */
+#define CXL_RAS_CAPABILITY_VERSION 3
+/* Give ample space for caps before this */
+#define CXL_RAS_REGISTERS_OFFSET 0x80
+#define CXL_RAS_REGISTERS_SIZE 0x58
+REG32(CXL_RAS_UNC_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET)
+#define CXL_RAS_UNC_ERR_CACHE_DATA_PARITY 0
+#define CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY 1
+#define CXL_RAS_UNC_ERR_CACHE_BE_PARITY 2
+#define CXL_RAS_UNC_ERR_CACHE_DATA_ECC 3
+#define CXL_RAS_UNC_ERR_MEM_DATA_PARITY 4
+#define CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY 5
+#define CXL_RAS_UNC_ERR_MEM_BE_PARITY 6
+#define CXL_RAS_UNC_ERR_MEM_DATA_ECC 7
+#define CXL_RAS_UNC_ERR_REINIT_THRESHOLD 8
+#define CXL_RAS_UNC_ERR_RSVD_ENCODING 9
+#define CXL_RAS_UNC_ERR_POISON_RECEIVED 10
+#define CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW 11
+#define CXL_RAS_UNC_ERR_INTERNAL 14
+#define CXL_RAS_UNC_ERR_CXL_IDE_TX 15
+#define CXL_RAS_UNC_ERR_CXL_IDE_RX 16
+#define CXL_RAS_UNC_ERR_CXL_UNUSED 63 /* Magic value */
+REG32(CXL_RAS_UNC_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x4)
+REG32(CXL_RAS_UNC_ERR_SEVERITY, CXL_RAS_REGISTERS_OFFSET + 0x8)
+REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc)
+#define CXL_RAS_COR_ERR_CACHE_DATA_ECC 0
+#define CXL_RAS_COR_ERR_MEM_DATA_ECC 1
+#define CXL_RAS_COR_ERR_CRC_THRESHOLD 2
+#define CXL_RAS_COR_ERR_RETRY_THRESHOLD 3
+#define CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED 4
+#define CXL_RAS_COR_ERR_MEM_POISON_RECEIVED 5
+#define CXL_RAS_COR_ERR_PHYSICAL 6
+REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10)
+REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14)
+ FIELD(CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER, 0, 6)
+ FIELD(CXL_RAS_ERR_CAP_CTRL, MULTIPLE_HEADER_RECORDING_CAP, 9, 1)
+ FIELD(CXL_RAS_ERR_POISON_ENABLED, POISON_ENABLED, 13, 1)
+REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18)
+#define CXL_RAS_ERR_HEADER_NUM 32
+/* Offset 0x18 - 0x58 reserved for RAS logs */
+
+/* CXL r3.1 Section 8.2.4.18: CXL Security Capability Structure */
+#define CXL_SEC_REGISTERS_OFFSET \
+ (CXL_RAS_REGISTERS_OFFSET + CXL_RAS_REGISTERS_SIZE)
+#define CXL_SEC_REGISTERS_SIZE 0 /* We don't implement 1.1 downstream ports */
+
+/* CXL r3.1 Section 8.2.4.19: CXL Link Capability Structure */
+#define CXL_LINK_CAPABILITY_VERSION 2
+#define CXL_LINK_REGISTERS_OFFSET \
+ (CXL_SEC_REGISTERS_OFFSET + CXL_SEC_REGISTERS_SIZE)
+#define CXL_LINK_REGISTERS_SIZE 0x50
+
+/* CXL r3.1 Section 8.2.4.20: CXL HDM Decoder Capability Structure */
+#define HDM_DECODE_MAX 10 /* Maximum decoders for Devices */
+#define CXL_HDM_CAPABILITY_VERSION 3
+#define CXL_HDM_REGISTERS_OFFSET \
+ (CXL_LINK_REGISTERS_OFFSET + CXL_LINK_REGISTERS_SIZE)
+#define CXL_HDM_REGISTERS_SIZE (0x10 + 0x20 * HDM_DECODE_MAX)
+#define HDM_DECODER_INIT(n) \
+ REG32(CXL_HDM_DECODER##n##_BASE_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x10) \
+ FIELD(CXL_HDM_DECODER##n##_BASE_LO, L, 28, 4) \
+ REG32(CXL_HDM_DECODER##n##_BASE_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x14) \
+ REG32(CXL_HDM_DECODER##n##_SIZE_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x18) \
+ REG32(CXL_HDM_DECODER##n##_SIZE_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x1C) \
+ REG32(CXL_HDM_DECODER##n##_CTRL, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x20) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, IG, 0, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, IW, 4, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, LOCK_ON_COMMIT, 8, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, COMMIT, 9, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, COMMITTED, 10, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, ERR, 11, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, TYPE, 12, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, BI, 13, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIO, 14, 1) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIG, 16, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, UIW, 20, 4) \
+ FIELD(CXL_HDM_DECODER##n##_CTRL, ISP, 24, 4) \
+ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \
+ REG32(CXL_HDM_DECODER##n##_TARGET_LIST_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x28) \
+ REG32(CXL_HDM_DECODER##n##_DPA_SKIP_LO, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x24) \
+ REG32(CXL_HDM_DECODER##n##_DPA_SKIP_HI, \
+ CXL_HDM_REGISTERS_OFFSET + (0x20 * n) + 0x28)
+
+REG32(CXL_HDM_DECODER_CAPABILITY, CXL_HDM_REGISTERS_OFFSET)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 0, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 4, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 8, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 9, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 10, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 11, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, 16_WAY, 12, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, UIO, 13, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, UIO_DECODER_COUNT, 16, 4)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 20, 1)
+ FIELD(CXL_HDM_DECODER_CAPABILITY, SUPPORTED_COHERENCY_MODEL, 21, 2)
+REG32(CXL_HDM_DECODER_GLOBAL_CONTROL, CXL_HDM_REGISTERS_OFFSET + 4)
+ FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, POISON_ON_ERR_EN, 0, 1)
+ FIELD(CXL_HDM_DECODER_GLOBAL_CONTROL, HDM_DECODER_ENABLE, 1, 1)
+
+/* Support 4 decoders at all levels of topology */
+#define CXL_HDM_DECODER_COUNT 4
+
+HDM_DECODER_INIT(0);
+HDM_DECODER_INIT(1);
+HDM_DECODER_INIT(2);
+HDM_DECODER_INIT(3);
+
+/*
+ * CXL r3.1 Section 8.2.4.21: CXL Extended Security Capability Structure
+ * (Root complex only)
+ */
+#define EXTSEC_ENTRY_MAX 256
+#define CXL_EXTSEC_CAP_VERSION 2
+#define CXL_EXTSEC_REGISTERS_OFFSET \
+ (CXL_HDM_REGISTERS_OFFSET + CXL_HDM_REGISTERS_SIZE)
+#define CXL_EXTSEC_REGISTERS_SIZE (8 * EXTSEC_ENTRY_MAX + 4)
+
+/* CXL r3.1 Section 8.2.4.22: CXL IDE Capability Structure */
+#define CXL_IDE_CAP_VERSION 2
+#define CXL_IDE_REGISTERS_OFFSET \
+ (CXL_EXTSEC_REGISTERS_OFFSET + CXL_EXTSEC_REGISTERS_SIZE)
+#define CXL_IDE_REGISTERS_SIZE 0x24
+
+/* CXL r3.1 Section 8.2.4.23 - CXL Snoop Filter Capability Structure */
+#define CXL_SNOOP_CAP_VERSION 1
+#define CXL_SNOOP_REGISTERS_OFFSET \
+ (CXL_IDE_REGISTERS_OFFSET + CXL_IDE_REGISTERS_SIZE)
+#define CXL_SNOOP_REGISTERS_SIZE 0x8
+
+QEMU_BUILD_BUG_MSG((CXL_SNOOP_REGISTERS_OFFSET +
+ CXL_SNOOP_REGISTERS_SIZE) >= 0x1000,
+ "No space for registers");
+
+typedef struct component_registers {
+ /*
+ * Main memory region to be registered with QEMU core.
+ */
+ MemoryRegion component_registers;
+
+ /*
+ * CXL r3.1 Table 8-21: CXL Subsystem Component Register Ranges
+ * 0x0000 - 0x0fff CXL.io registers
+ * 0x1000 - 0x1fff CXL.cache and CXL.mem
+ * 0x2000 - 0xdfff Implementation specific
+ * 0xe000 - 0xe3ff CXL ARB/MUX registers
+ * 0xe400 - 0xffff RSVD
+ */
+ uint32_t io_registers[CXL2_COMPONENT_IO_REGION_SIZE >> 2];
+ MemoryRegion io;
+
+ uint32_t cache_mem_registers[CXL2_COMPONENT_CM_REGION_SIZE >> 2];
+ uint32_t cache_mem_regs_write_mask[CXL2_COMPONENT_CM_REGION_SIZE >> 2];
+ MemoryRegion cache_mem;
+
+ MemoryRegion impl_specific;
+ MemoryRegion arb_mux;
+ MemoryRegion rsvd;
+
+ /* special_ops is used for any component that needs any specific handling */
+ MemoryRegionOps *special_ops;
+} ComponentRegisters;
+
+/*
+ * A CXL component represents all entities in a CXL hierarchy. This includes,
+ * host bridges, root ports, upstream/downstream switch ports, and devices
+ */
+typedef struct cxl_component {
+ ComponentRegisters crb;
+ union {
+ struct {
+ Range dvsecs[CXL20_MAX_DVSEC];
+ uint16_t dvsec_offset;
+ struct PCIDevice *pdev;
+ };
+ };
+
+ CDATObject cdat;
+} CXLComponentState;
+
+void cxl_component_register_block_init(Object *obj,
+ CXLComponentState *cxl_cstate,
+ const char *type);
+void cxl_component_register_init_common(uint32_t *reg_state,
+ uint32_t *write_msk,
+ enum reg_type type);
+
+void cxl_component_create_dvsec(CXLComponentState *cxl_cstate,
+ enum reg_type cxl_dev_type, uint16_t length,
+ uint16_t type, uint8_t rev, uint8_t *body);
+
+int cxl_decoder_count_enc(int count);
+int cxl_decoder_count_dec(int enc_cnt);
+
+uint8_t cxl_interleave_ways_enc(int iw, Error **errp);
+int cxl_interleave_ways_dec(uint8_t iw_enc, Error **errp);
+uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp);
+
+hwaddr cxl_decode_ig(int ig);
+
+CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb);
+bool cxl_get_hb_passthrough(PCIHostState *hb);
+
+void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp);
+void cxl_doe_cdat_release(CXLComponentState *cxl_cstate);
+void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp);
+
+#endif
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
new file mode 100644
index 0000000000..279b276bda
--- /dev/null
+++ b/include/hw/cxl/cxl_device.h
@@ -0,0 +1,506 @@
+/*
+ * QEMU CXL Devices
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_DEVICE_H
+#define CXL_DEVICE_H
+
+#include "hw/cxl/cxl_component.h"
+#include "hw/pci/pci_device.h"
+#include "hw/register.h"
+#include "hw/cxl/cxl_events.h"
+
+/*
+ * The following is how a CXL device's Memory Device registers are laid out.
+ * The only requirement from the spec is that the capabilities array and the
+ * capability headers start at offset 0 and are contiguously packed. The headers
+ * themselves provide offsets to the register fields. For this emulation, the
+ * actual registers * will start at offset 0x80 (m == 0x80). No secondary
+ * mailbox is implemented which means that the offset of the start of the
+ * mailbox payload (n) is given by
+ * n = m + sizeof(mailbox registers) + sizeof(device registers).
+ *
+ * +---------------------------------+
+ * | |
+ * | Memory Device Registers |
+ * | |
+ * n + PAYLOAD_SIZE_MAX -----------------------------------
+ * ^ | |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | | Mailbox Payload |
+ * | | |
+ * | | |
+ * | | |
+ * n -----------------------------------
+ * ^ | Mailbox Registers |
+ * | | |
+ * | -----------------------------------
+ * | | |
+ * | | Device Registers |
+ * | | |
+ * m ---------------------------------->
+ * ^ | Memory Device Capability Header|
+ * | -----------------------------------
+ * | | Mailbox Capability Header |
+ * | -----------------------------------
+ * | | Device Capability Header |
+ * | -----------------------------------
+ * | | Device Cap Array Register |
+ * 0 +---------------------------------+
+ *
+ */
+
+/* CXL r3.1 Figure 8-12: CXL Device Registers */
+#define CXL_DEVICE_CAP_HDR1_OFFSET 0x10
+/* CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register */
+#define CXL_DEVICE_CAP_REG_SIZE 0x10
+
+/*
+ * CXL r3.1 Section 8.2.8.2.1: CXL Device Capabilities +
+ * CXL r3.1 Section 8.2.8.5: Memory Device Capabilities
+ */
+#define CXL_DEVICE_CAPS_MAX 4
+#define CXL_CAPS_SIZE \
+ (CXL_DEVICE_CAP_REG_SIZE * (CXL_DEVICE_CAPS_MAX + 1)) /* +1 for header */
+
+#define CXL_DEVICE_STATUS_REGISTERS_OFFSET 0x80 /* Read comment above */
+/*
+ * CXL r3.1 Section 8.2.8.3: Device Status Registers
+ * As it is the only Device Status Register in CXL r3.1
+ */
+#define CXL_DEVICE_STATUS_REGISTERS_LENGTH 0x8
+
+#define CXL_MAILBOX_REGISTERS_OFFSET \
+ (CXL_DEVICE_STATUS_REGISTERS_OFFSET + CXL_DEVICE_STATUS_REGISTERS_LENGTH)
+/* CXL r3.1 Figure 8-13: Mailbox Registers */
+#define CXL_MAILBOX_REGISTERS_SIZE 0x20
+#define CXL_MAILBOX_PAYLOAD_SHIFT 11
+#define CXL_MAILBOX_MAX_PAYLOAD_SIZE (1 << CXL_MAILBOX_PAYLOAD_SHIFT)
+#define CXL_MAILBOX_REGISTERS_LENGTH \
+ (CXL_MAILBOX_REGISTERS_SIZE + CXL_MAILBOX_MAX_PAYLOAD_SIZE)
+
+#define CXL_MEMORY_DEVICE_REGISTERS_OFFSET \
+ (CXL_MAILBOX_REGISTERS_OFFSET + CXL_MAILBOX_REGISTERS_LENGTH)
+#define CXL_MEMORY_DEVICE_REGISTERS_LENGTH 0x8
+
+#define CXL_MMIO_SIZE \
+ (CXL_DEVICE_CAP_REG_SIZE + CXL_DEVICE_STATUS_REGISTERS_LENGTH + \
+ CXL_MAILBOX_REGISTERS_LENGTH + CXL_MEMORY_DEVICE_REGISTERS_LENGTH)
+
+/* CXL r3.1 Table 8-34: Command Return Codes */
+typedef enum {
+ CXL_MBOX_SUCCESS = 0x0,
+ CXL_MBOX_BG_STARTED = 0x1,
+ CXL_MBOX_INVALID_INPUT = 0x2,
+ CXL_MBOX_UNSUPPORTED = 0x3,
+ CXL_MBOX_INTERNAL_ERROR = 0x4,
+ CXL_MBOX_RETRY_REQUIRED = 0x5,
+ CXL_MBOX_BUSY = 0x6,
+ CXL_MBOX_MEDIA_DISABLED = 0x7,
+ CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
+ CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
+ CXL_MBOX_FW_AUTH_FAILED = 0xa,
+ CXL_MBOX_FW_INVALID_SLOT = 0xb,
+ CXL_MBOX_FW_ROLLEDBACK = 0xc,
+ CXL_MBOX_FW_REST_REQD = 0xd,
+ CXL_MBOX_INVALID_HANDLE = 0xe,
+ CXL_MBOX_INVALID_PA = 0xf,
+ CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
+ CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
+ CXL_MBOX_ABORTED = 0x12,
+ CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
+ CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
+ CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
+ CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
+ CXL_MBOX_INVALID_LOG = 0x17,
+ CXL_MBOX_INTERRUPTED = 0x18,
+ CXL_MBOX_UNSUPPORTED_FEATURE_VERSION = 0x19,
+ CXL_MBOX_UNSUPPORTED_FEATURE_SELECTION_VALUE = 0x1a,
+ CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS = 0x1b,
+ CXL_MBOX_FEATURE_TRANSFER_OUT_OF_ORDER = 0x1c,
+ CXL_MBOX_RESOURCES_EXHAUSTED = 0x1d,
+ CXL_MBOX_INVALID_EXTENT_LIST = 0x1e,
+ CXL_MBOX_TRANSFER_OUT_OF_ORDER = 0x1f,
+ CXL_MBOX_REQUEST_ABORT_NOTSUP = 0x20,
+ CXL_MBOX_MAX = 0x20
+} CXLRetCode;
+
+typedef struct CXLCCI CXLCCI;
+typedef struct cxl_device_state CXLDeviceState;
+struct cxl_cmd;
+typedef CXLRetCode (*opcode_handler)(const struct cxl_cmd *cmd,
+ uint8_t *payload_in, size_t len_in,
+ uint8_t *payload_out, size_t *len_out,
+ CXLCCI *cci);
+struct cxl_cmd {
+ const char *name;
+ opcode_handler handler;
+ ssize_t in;
+ uint16_t effect; /* Reported in CEL */
+};
+
+typedef struct CXLEvent {
+ CXLEventRecordRaw data;
+ QSIMPLEQ_ENTRY(CXLEvent) node;
+} CXLEvent;
+
+typedef struct CXLEventLog {
+ uint16_t next_handle;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ bool irq_enabled;
+ int irq_vec;
+ QemuMutex lock;
+ QSIMPLEQ_HEAD(, CXLEvent) events;
+} CXLEventLog;
+
+typedef struct CXLCCI {
+ const struct cxl_cmd (*cxl_cmd_set)[256];
+ struct cel_log {
+ uint16_t opcode;
+ uint16_t effect;
+ } cel_log[1 << 16];
+ size_t cel_size;
+
+ /* background command handling (times in ms) */
+ struct {
+ uint16_t opcode;
+ uint16_t complete_pct;
+ uint16_t ret_code; /* Current value of retcode */
+ uint64_t starttime;
+ /* set by each bg cmd, cleared by the bg_timer when complete */
+ uint64_t runtime;
+ QEMUTimer *timer;
+ } bg;
+ size_t payload_max;
+ /* Pointer to device hosting the CCI */
+ DeviceState *d;
+ /* Pointer to the device hosting the protocol conversion */
+ DeviceState *intf;
+} CXLCCI;
+
+typedef struct cxl_device_state {
+ MemoryRegion device_registers;
+
+ /* CXL r3.1 Section 8.2.8.3: Device Status Registers */
+ struct {
+ MemoryRegion device;
+ union {
+ uint8_t dev_reg_state[CXL_DEVICE_STATUS_REGISTERS_LENGTH];
+ uint16_t dev_reg_state16[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 2];
+ uint32_t dev_reg_state32[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 4];
+ uint64_t dev_reg_state64[CXL_DEVICE_STATUS_REGISTERS_LENGTH / 8];
+ };
+ uint64_t event_status;
+ };
+ MemoryRegion memory_device;
+ struct {
+ MemoryRegion caps;
+ union {
+ uint32_t caps_reg_state32[CXL_CAPS_SIZE / 4];
+ uint64_t caps_reg_state64[CXL_CAPS_SIZE / 8];
+ };
+ };
+
+ /* CXL r3.1 Section 8.2.8.4: Mailbox Registers */
+ struct {
+ MemoryRegion mailbox;
+ uint16_t payload_size;
+ uint8_t mbox_msi_n;
+ union {
+ uint8_t mbox_reg_state[CXL_MAILBOX_REGISTERS_LENGTH];
+ uint16_t mbox_reg_state16[CXL_MAILBOX_REGISTERS_LENGTH / 2];
+ uint32_t mbox_reg_state32[CXL_MAILBOX_REGISTERS_LENGTH / 4];
+ uint64_t mbox_reg_state64[CXL_MAILBOX_REGISTERS_LENGTH / 8];
+ };
+ };
+
+ /* Stash the memory device status value */
+ uint64_t memdev_status;
+
+ struct {
+ bool set;
+ uint64_t last_set;
+ uint64_t host_set;
+ } timestamp;
+
+ /* memory region size, HDM */
+ uint64_t mem_size;
+ uint64_t pmem_size;
+ uint64_t vmem_size;
+
+ const struct cxl_cmd (*cxl_cmd_set)[256];
+ CXLEventLog event_logs[CXL_EVENT_TYPE_MAX];
+} CXLDeviceState;
+
+/* Initialize the register block for a device */
+void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev,
+ CXLCCI *cci);
+
+typedef struct CXLType3Dev CXLType3Dev;
+typedef struct CSWMBCCIDev CSWMBCCIDev;
+/* Set up default values for the register block */
+void cxl_device_register_init_t3(CXLType3Dev *ct3d);
+void cxl_device_register_init_swcci(CSWMBCCIDev *sw);
+
+/*
+ * CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register
+ * Documented as a 128 bit register, but 64 bit accesses and the second
+ * 64 bits are currently reserved.
+ */
+REG64(CXL_DEV_CAP_ARRAY, 0)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_ID, 0, 16)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_VERSION, 16, 8)
+ FIELD(CXL_DEV_CAP_ARRAY, CAP_COUNT, 32, 16)
+
+void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
+ bool available);
+
+/*
+ * Helper macro to initialize capability headers for CXL devices.
+ *
+ * In CXL r3.1 Section 8.2.8.2: CXL Device Capability Header Register, this is
+ * listed as a 128b register, but in CXL r3.1 Section 8.2.8: CXL Device Register
+ * Interface, it says:
+ * > No registers defined in Section 8.2.8 are larger than 64-bits wide so that
+ * > is the maximum access size allowed for these registers. If this rule is not
+ * > followed, the behavior is undefined.
+ *
+ * > To illustrate how the fields fit together, the layouts ... are shown as
+ * > wider than a 64 bit register. Implementations are expected to use any size
+ * > accesses for this information up to 64 bits without lost of functionality
+ *
+ * Here we've chosen to make it 4 dwords.
+ */
+#define CXL_DEVICE_CAPABILITY_HEADER_REGISTER(n, offset) \
+ REG32(CXL_DEV_##n##_CAP_HDR0, offset) \
+ FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_ID, 0, 16) \
+ FIELD(CXL_DEV_##n##_CAP_HDR0, CAP_VERSION, 16, 8) \
+ REG32(CXL_DEV_##n##_CAP_HDR1, offset + 4) \
+ FIELD(CXL_DEV_##n##_CAP_HDR1, CAP_OFFSET, 0, 32) \
+ REG32(CXL_DEV_##n##_CAP_HDR2, offset + 8) \
+ FIELD(CXL_DEV_##n##_CAP_HDR2, CAP_LENGTH, 0, 32)
+
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(DEVICE_STATUS, CXL_DEVICE_CAP_HDR1_OFFSET)
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MAILBOX, CXL_DEVICE_CAP_HDR1_OFFSET + \
+ CXL_DEVICE_CAP_REG_SIZE)
+CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE,
+ CXL_DEVICE_CAP_HDR1_OFFSET +
+ CXL_DEVICE_CAP_REG_SIZE * 2)
+
+void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max);
+void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
+ DeviceState *d, size_t payload_max);
+void cxl_init_cci(CXLCCI *cci, size_t payload_max);
+int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
+ size_t len_in, uint8_t *pl_in,
+ size_t *len_out, uint8_t *pl_out,
+ bool *bg_started);
+void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
+ DeviceState *intf,
+ size_t payload_max);
+
+void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d,
+ DeviceState *intf, size_t payload_max);
+
+#define cxl_device_cap_init(dstate, reg, cap_id, ver) \
+ do { \
+ uint32_t *cap_hdrs = dstate->caps_reg_state32; \
+ int which = R_CXL_DEV_##reg##_CAP_HDR0; \
+ cap_hdrs[which] = \
+ FIELD_DP32(cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, \
+ CAP_ID, cap_id); \
+ cap_hdrs[which] = FIELD_DP32( \
+ cap_hdrs[which], CXL_DEV_##reg##_CAP_HDR0, CAP_VERSION, ver); \
+ cap_hdrs[which + 1] = \
+ FIELD_DP32(cap_hdrs[which + 1], CXL_DEV_##reg##_CAP_HDR1, \
+ CAP_OFFSET, CXL_##reg##_REGISTERS_OFFSET); \
+ cap_hdrs[which + 2] = \
+ FIELD_DP32(cap_hdrs[which + 2], CXL_DEV_##reg##_CAP_HDR2, \
+ CAP_LENGTH, CXL_##reg##_REGISTERS_LENGTH); \
+ } while (0)
+
+/* CXL r3.2 Section 8.2.8.3.1: Event Status Register */
+#define CXL_DEVICE_STATUS_VERSION 2
+REG64(CXL_DEV_EVENT_STATUS, 0)
+ FIELD(CXL_DEV_EVENT_STATUS, EVENT_STATUS, 0, 32)
+
+#define CXL_DEV_MAILBOX_VERSION 1
+/* CXL r3.1 Section 8.2.8.4.3: Mailbox Capabilities Register */
+REG32(CXL_DEV_MAILBOX_CAP, 0)
+ FIELD(CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, 0, 5)
+ FIELD(CXL_DEV_MAILBOX_CAP, INT_CAP, 5, 1)
+ FIELD(CXL_DEV_MAILBOX_CAP, BG_INT_CAP, 6, 1)
+ FIELD(CXL_DEV_MAILBOX_CAP, MSI_N, 7, 4)
+ FIELD(CXL_DEV_MAILBOX_CAP, MBOX_READY_TIME, 11, 8)
+ FIELD(CXL_DEV_MAILBOX_CAP, TYPE, 19, 4)
+
+/* CXL r3.1 Section 8.2.8.4.4: Mailbox Control Register */
+REG32(CXL_DEV_MAILBOX_CTRL, 4)
+ FIELD(CXL_DEV_MAILBOX_CTRL, DOORBELL, 0, 1)
+ FIELD(CXL_DEV_MAILBOX_CTRL, INT_EN, 1, 1)
+ FIELD(CXL_DEV_MAILBOX_CTRL, BG_INT_EN, 2, 1)
+
+/* CXL r3.1 Section 8.2.8.4.5: Command Register */
+REG64(CXL_DEV_MAILBOX_CMD, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, COMMAND, 0, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, COMMAND_SET, 8, 8)
+ FIELD(CXL_DEV_MAILBOX_CMD, LENGTH, 16, 20)
+
+/* CXL r3.1 Section 8.2.8.4.6: Mailbox Status Register */
+REG64(CXL_DEV_MAILBOX_STS, 0x10)
+ FIELD(CXL_DEV_MAILBOX_STS, BG_OP, 0, 1)
+ FIELD(CXL_DEV_MAILBOX_STS, ERRNO, 32, 16)
+ FIELD(CXL_DEV_MAILBOX_STS, VENDOR_ERRNO, 48, 16)
+
+/* CXL r3.1 Section 8.2.8.4.7: Background Command Status Register */
+REG64(CXL_DEV_BG_CMD_STS, 0x18)
+ FIELD(CXL_DEV_BG_CMD_STS, OP, 0, 16)
+ FIELD(CXL_DEV_BG_CMD_STS, PERCENTAGE_COMP, 16, 7)
+ FIELD(CXL_DEV_BG_CMD_STS, RET_CODE, 32, 16)
+ FIELD(CXL_DEV_BG_CMD_STS, VENDOR_RET_CODE, 48, 16)
+
+/* CXL r3.1 Section 8.2.8.4.8: Command Payload Registers */
+REG32(CXL_DEV_CMD_PAYLOAD, 0x20)
+
+/* CXL r3.1 Section 8.2.8.4.1: Memory Device Status Registers */
+#define CXL_MEM_DEV_STATUS_VERSION 1
+REG64(CXL_MEM_DEV_STS, 0)
+ FIELD(CXL_MEM_DEV_STS, FATAL, 0, 1)
+ FIELD(CXL_MEM_DEV_STS, FW_HALT, 1, 1)
+ FIELD(CXL_MEM_DEV_STS, MEDIA_STATUS, 2, 2)
+ FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1)
+ FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3)
+
+static inline void __toggle_media(CXLDeviceState *cxl_dstate, int val)
+{
+ uint64_t dev_status_reg;
+
+ dev_status_reg = cxl_dstate->memdev_status;
+ dev_status_reg = FIELD_DP64(dev_status_reg, CXL_MEM_DEV_STS, MEDIA_STATUS,
+ val);
+ cxl_dstate->memdev_status = dev_status_reg;
+}
+#define cxl_dev_disable_media(cxlds) \
+ do { __toggle_media((cxlds), 0x3); } while (0)
+#define cxl_dev_enable_media(cxlds) \
+ do { __toggle_media((cxlds), 0x1); } while (0)
+
+static inline bool sanitize_running(CXLCCI *cci)
+{
+ return !!cci->bg.runtime && cci->bg.opcode == 0x4400;
+}
+
+typedef struct CXLError {
+ QTAILQ_ENTRY(CXLError) node;
+ int type; /* Error code as per FE definition */
+ uint32_t header[CXL_RAS_ERR_HEADER_NUM];
+} CXLError;
+
+typedef QTAILQ_HEAD(, CXLError) CXLErrorList;
+
+typedef struct CXLPoison {
+ uint64_t start, length;
+ uint8_t type;
+#define CXL_POISON_TYPE_EXTERNAL 0x1
+#define CXL_POISON_TYPE_INTERNAL 0x2
+#define CXL_POISON_TYPE_INJECTED 0x3
+ QLIST_ENTRY(CXLPoison) node;
+} CXLPoison;
+
+typedef QLIST_HEAD(, CXLPoison) CXLPoisonList;
+#define CXL_POISON_LIST_LIMIT 256
+
+struct CXLType3Dev {
+ /* Private */
+ PCIDevice parent_obj;
+
+ /* Properties */
+ HostMemoryBackend *hostmem; /* deprecated */
+ HostMemoryBackend *hostvmem;
+ HostMemoryBackend *hostpmem;
+ HostMemoryBackend *lsa;
+ uint64_t sn;
+
+ /* State */
+ AddressSpace hostvmem_as;
+ AddressSpace hostpmem_as;
+ CXLComponentState cxl_cstate;
+ CXLDeviceState cxl_dstate;
+ CXLCCI cci; /* Primary PCI mailbox CCI */
+ /* Always initialized as no way to know if a VDM might show up */
+ CXLCCI vdm_fm_owned_ld_mctp_cci;
+ CXLCCI ld0_cci;
+
+ /* DOE */
+ DOECap doe_cdat;
+
+ /* Error injection */
+ CXLErrorList error_list;
+
+ /* Poison Injection - cache */
+ CXLPoisonList poison_list;
+ unsigned int poison_list_cnt;
+ bool poison_list_overflowed;
+ uint64_t poison_list_overflow_ts;
+};
+
+#define TYPE_CXL_TYPE3 "cxl-type3"
+OBJECT_DECLARE_TYPE(CXLType3Dev, CXLType3Class, CXL_TYPE3)
+
+struct CXLType3Class {
+ /* Private */
+ PCIDeviceClass parent_class;
+
+ /* public */
+ uint64_t (*get_lsa_size)(CXLType3Dev *ct3d);
+
+ uint64_t (*get_lsa)(CXLType3Dev *ct3d, void *buf, uint64_t size,
+ uint64_t offset);
+ void (*set_lsa)(CXLType3Dev *ct3d, const void *buf, uint64_t size,
+ uint64_t offset);
+ bool (*set_cacheline)(CXLType3Dev *ct3d, uint64_t dpa_offset,
+ uint8_t *data);
+};
+
+struct CSWMBCCIDev {
+ PCIDevice parent_obj;
+ PCIDevice *target;
+ CXLComponentState cxl_cstate;
+ CXLDeviceState cxl_dstate;
+ CXLCCI *cci;
+};
+
+#define TYPE_CXL_SWITCH_MAILBOX_CCI "cxl-switch-mailbox-cci"
+OBJECT_DECLARE_TYPE(CSWMBCCIDev, CSWMBCCIClass, CXL_SWITCH_MAILBOX_CCI)
+
+MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
+ unsigned size, MemTxAttrs attrs);
+
+uint64_t cxl_device_get_timestamp(CXLDeviceState *cxlds);
+
+void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num);
+bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
+ CXLEventRecordRaw *event);
+CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
+ uint8_t log_type, int max_recs,
+ size_t *len);
+CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
+ CXLClearEventPayload *pl);
+
+void cxl_event_irq_assert(CXLType3Dev *ct3d);
+
+void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d);
+
+#endif
diff --git a/include/hw/cxl/cxl_events.h b/include/hw/cxl/cxl_events.h
new file mode 100644
index 0000000000..5170b8dbf8
--- /dev/null
+++ b/include/hw/cxl/cxl_events.h
@@ -0,0 +1,169 @@
+/*
+ * QEMU CXL Events
+ *
+ * Copyright (c) 2022 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_EVENTS_H
+#define CXL_EVENTS_H
+
+#include "qemu/uuid.h"
+
+/*
+ * CXL r3.1 section 8.2.9.2.2: Get Event Records (Opcode 0100h); Table 8-52
+ *
+ * Define these as the bit position for the event status register for ease of
+ * setting the status.
+ */
+typedef enum CXLEventLogType {
+ CXL_EVENT_TYPE_INFO = 0,
+ CXL_EVENT_TYPE_WARN = 1,
+ CXL_EVENT_TYPE_FAIL = 2,
+ CXL_EVENT_TYPE_FATAL = 3,
+ CXL_EVENT_TYPE_DYNAMIC_CAP = 4,
+ CXL_EVENT_TYPE_MAX
+} CXLEventLogType;
+
+/*
+ * Common Event Record Format
+ * CXL r3.1 section 8.2.9.2.1: Event Records; Table 8-43
+ */
+#define CXL_EVENT_REC_HDR_RES_LEN 0xf
+typedef struct CXLEventRecordHdr {
+ QemuUUID id;
+ uint8_t length;
+ uint8_t flags[3];
+ uint16_t handle;
+ uint16_t related_handle;
+ uint64_t timestamp;
+ uint8_t maint_op_class;
+ uint8_t reserved[CXL_EVENT_REC_HDR_RES_LEN];
+} QEMU_PACKED CXLEventRecordHdr;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+typedef struct CXLEventRecordRaw {
+ CXLEventRecordHdr hdr;
+ uint8_t data[CXL_EVENT_RECORD_DATA_LENGTH];
+} QEMU_PACKED CXLEventRecordRaw;
+#define CXL_EVENT_RECORD_SIZE (sizeof(CXLEventRecordRaw))
+
+/*
+ * Get Event Records output payload
+ * CXL r3.1 section 8.2.9.2.2; Table 8-53
+ */
+#define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0)
+#define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1)
+typedef struct CXLGetEventPayload {
+ uint8_t flags;
+ uint8_t reserved1;
+ uint16_t overflow_err_count;
+ uint64_t first_overflow_timestamp;
+ uint64_t last_overflow_timestamp;
+ uint16_t record_count;
+ uint8_t reserved2[0xa];
+ CXLEventRecordRaw records[];
+} QEMU_PACKED CXLGetEventPayload;
+#define CXL_EVENT_PAYLOAD_HDR_SIZE (sizeof(CXLGetEventPayload))
+
+/*
+ * Clear Event Records input payload
+ * CXL r3.1 section 8.2.9.2.3; Table 8-54
+ */
+typedef struct CXLClearEventPayload {
+ uint8_t event_log; /* CXLEventLogType */
+ uint8_t clear_flags;
+ uint8_t nr_recs;
+ uint8_t reserved[3];
+ uint16_t handle[];
+} CXLClearEventPayload;
+
+/*
+ * Event Interrupt Policy
+ *
+ * CXL r3.1 section 8.2.9.2.4; Table 8-55
+ */
+typedef enum CXLEventIntMode {
+ CXL_INT_NONE = 0x00,
+ CXL_INT_MSI_MSIX = 0x01,
+ CXL_INT_FW = 0x02,
+ CXL_INT_RES = 0x03,
+} CXLEventIntMode;
+#define CXL_EVENT_INT_MODE_MASK 0x3
+#define CXL_EVENT_INT_SETTING(vector) \
+ ((((uint8_t)vector & 0xf) << 4) | CXL_INT_MSI_MSIX)
+typedef struct CXLEventInterruptPolicy {
+ uint8_t info_settings;
+ uint8_t warn_settings;
+ uint8_t failure_settings;
+ uint8_t fatal_settings;
+ uint8_t dyn_cap_settings;
+} QEMU_PACKED CXLEventInterruptPolicy;
+/* DCD is optional but other fields are not */
+#define CXL_EVENT_INT_SETTING_MIN_LEN 4
+
+/*
+ * General Media Event Record
+ * CXL r3.1 Section 8.2.9.2.1.1; Table 8-45
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+#define CXL_EVENT_GEN_MED_RES_SIZE 0x2e
+typedef struct CXLEventGenMedia {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t device[3];
+ uint8_t component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ uint8_t reserved[CXL_EVENT_GEN_MED_RES_SIZE];
+} QEMU_PACKED CXLEventGenMedia;
+
+/*
+ * DRAM Event Record
+ * CXL r3.1 Section 8.2.9.2.1.2: Table 8-46
+ * All fields little endian.
+ */
+typedef struct CXLEventDram {
+ CXLEventRecordHdr hdr;
+ uint64_t phys_addr;
+ uint8_t descriptor;
+ uint8_t type;
+ uint8_t transaction_type;
+ uint16_t validity_flags;
+ uint8_t channel;
+ uint8_t rank;
+ uint8_t nibble_mask[3];
+ uint8_t bank_group;
+ uint8_t bank;
+ uint8_t row[3];
+ uint16_t column;
+ uint64_t correction_mask[4];
+ uint8_t reserved[0x17];
+} QEMU_PACKED CXLEventDram;
+
+/*
+ * Memory Module Event Record
+ * CXL r3.1 Section 8.2.9.2.1.3: Table 8-47
+ * All fields little endian.
+ */
+typedef struct CXLEventMemoryModule {
+ CXLEventRecordHdr hdr;
+ uint8_t type;
+ uint8_t health_status;
+ uint8_t media_status;
+ uint8_t additional_status;
+ uint8_t life_used;
+ int16_t temperature;
+ uint32_t dirty_shutdown_count;
+ uint32_t corrected_volatile_error_count;
+ uint32_t corrected_persistent_error_count;
+ uint8_t reserved[0x3d];
+} QEMU_PACKED CXLEventMemoryModule;
+
+#endif /* CXL_EVENTS_H */
diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h
new file mode 100644
index 0000000000..c9bc9c7c50
--- /dev/null
+++ b/include/hw/cxl/cxl_host.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU CXL Host Setup
+ *
+ * Copyright (c) 2022 Huawei
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "hw/cxl/cxl.h"
+#include "hw/boards.h"
+
+#ifndef CXL_HOST_H
+#define CXL_HOST_H
+
+void cxl_machine_init(Object *obj, CXLState *state);
+void cxl_fmws_link_targets(CXLState *stat, Error **errp);
+void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp);
+
+extern const MemoryRegionOps cfmws_ops;
+
+#endif
diff --git a/include/hw/cxl/cxl_pci.h b/include/hw/cxl/cxl_pci.h
new file mode 100644
index 0000000000..d0855ed78b
--- /dev/null
+++ b/include/hw/cxl/cxl_pci.h
@@ -0,0 +1,194 @@
+/*
+ * QEMU CXL PCI interfaces
+ *
+ * Copyright (c) 2020 Intel
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef CXL_PCI_H
+#define CXL_PCI_H
+
+
+#define CXL_VENDOR_ID 0x1e98
+
+#define PCIE_DVSEC_HEADER1_OFFSET 0x4 /* Offset from start of extend cap */
+#define PCIE_DVSEC_ID_OFFSET 0x8
+
+#define PCIE_CXL_DEVICE_DVSEC_LENGTH 0x3C
+#define PCIE_CXL31_DEVICE_DVSEC_REVID 3
+
+#define EXTENSIONS_PORT_DVSEC_LENGTH 0x28
+#define EXTENSIONS_PORT_DVSEC_REVID 0
+
+#define GPF_PORT_DVSEC_LENGTH 0x10
+#define GPF_PORT_DVSEC_REVID 0
+
+#define GPF_DEVICE_DVSEC_LENGTH 0x10
+#define GPF_DEVICE_DVSEC_REVID 0
+
+#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH 0x20
+#define PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID 2
+
+#define REG_LOC_DVSEC_LENGTH 0x24
+#define REG_LOC_DVSEC_REVID 0
+
+enum {
+ PCIE_CXL_DEVICE_DVSEC = 0,
+ NON_CXL_FUNCTION_MAP_DVSEC = 2,
+ EXTENSIONS_PORT_DVSEC = 3,
+ GPF_PORT_DVSEC = 4,
+ GPF_DEVICE_DVSEC = 5,
+ PCIE_FLEXBUS_PORT_DVSEC = 7,
+ REG_LOC_DVSEC = 8,
+ MLD_DVSEC = 9,
+ CXL20_MAX_DVSEC
+};
+
+typedef struct DVSECHeader {
+ uint32_t cap_hdr;
+ uint32_t dv_hdr1;
+ uint16_t dv_hdr2;
+} QEMU_PACKED DVSECHeader;
+QEMU_BUILD_BUG_ON(sizeof(DVSECHeader) != 10);
+
+/*
+ * CXL r3.1 Table 8-2: CXL DVSEC ID Assignment
+ * Devices must implement certain DVSEC IDs, and can [optionally]
+ * implement others.
+ * (x) - IDs in Table 8-2.
+ *
+ * CXL RCD (D1): 0, [2], [5], 7, [8], A - Not emulated yet
+ * CXL RCD USP (UP1): 7, [8] - Not emulated yet
+ * CXL RCH DSP (DP1): 7, [8]
+ * CXL SLD (D2): 0, [2], 5, 7, 8, [A]
+ * CXL LD (LD): 0, [2], 5, 7, 8
+ * CXL RP (R): 3, 4, 7, 8
+ * CXL Switch USP (USP): [2], 7, 8
+ * CXL Switch DSP (DSP): 3, 4, 7, 8
+ * FM-Owned LD (FMLD): 0, [2], 7, 8, 9
+ */
+
+/*
+ * CXL r3.1 Section 8.1.3: PCIe DVSEC for Devices
+ * DVSEC ID: 0, Revision: 3
+ */
+typedef struct CXLDVSECDevice {
+ DVSECHeader hdr;
+ uint16_t cap;
+ uint16_t ctrl;
+ uint16_t status;
+ uint16_t ctrl2;
+ uint16_t status2;
+ uint16_t lock;
+ uint16_t cap2;
+ uint32_t range1_size_hi;
+ uint32_t range1_size_lo;
+ uint32_t range1_base_hi;
+ uint32_t range1_base_lo;
+ uint32_t range2_size_hi;
+ uint32_t range2_size_lo;
+ uint32_t range2_base_hi;
+ uint32_t range2_base_lo;
+ uint16_t cap3;
+ uint16_t resv;
+} QEMU_PACKED CXLDVSECDevice;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDevice) != PCIE_CXL_DEVICE_DVSEC_LENGTH);
+
+/*
+ * CXL r3.1 Section 8.1.5: CXL Extensions DVSEC for Ports
+ * DVSEC ID: 3, Revision: 0
+ */
+typedef struct CXLDVSECPortExt {
+ DVSECHeader hdr;
+ uint16_t status;
+ uint16_t control;
+ uint8_t alt_bus_base;
+ uint8_t alt_bus_limit;
+ uint16_t alt_memory_base;
+ uint16_t alt_memory_limit;
+ uint16_t alt_prefetch_base;
+ uint16_t alt_prefetch_limit;
+ uint32_t alt_prefetch_base_high;
+ uint32_t alt_prefetch_limit_high;
+ uint32_t rcrb_base;
+ uint32_t rcrb_base_high;
+} CXLDVSECPortExt;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortExt) != 0x28);
+
+#define PORT_CONTROL_OFFSET 0xc
+#define PORT_CONTROL_UNMASK_SBR 1
+#define PORT_CONTROL_ALT_MEMID_EN 4
+
+/*
+ * CXL r3.1 Section 8.1.6: GPF DVSEC for CXL Port
+ * DVSEC ID: 4, Revision: 0
+ */
+typedef struct CXLDVSECPortGPF {
+ DVSECHeader hdr;
+ uint16_t rsvd;
+ uint16_t phase1_ctrl;
+ uint16_t phase2_ctrl;
+} CXLDVSECPortGPF;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortGPF) != 0x10);
+
+/*
+ * CXL r3.1 Section 8.1.7: GPF DVSEC for CXL Device
+ * DVSEC ID: 5, Revision 0
+ */
+typedef struct CXLDVSECDeviceGPF {
+ DVSECHeader hdr;
+ uint16_t phase2_duration;
+ uint32_t phase2_power;
+} CXLDVSECDeviceGPF;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECDeviceGPF) != 0x10);
+
+/*
+ * CXL r3.1 Section 8.1.8: PCIe DVSEC for Flex Bus Port
+ * CXL r3.1 Section 8.2.1.3: Flex Bus Port DVSEC
+ * DVSEC ID: 7, Revision 2
+ */
+typedef struct CXLDVSECPortFlexBus {
+ DVSECHeader hdr;
+ uint16_t cap;
+ uint16_t ctrl;
+ uint16_t status;
+ uint32_t rcvd_mod_ts_data_phase1;
+ uint32_t cap2;
+ uint32_t ctrl2;
+ uint32_t status2;
+} CXLDVSECPortFlexBus;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECPortFlexBus) != 0x20);
+
+/*
+ * CXL r3.1 Section 8.1.9: Register Locator DVSEC
+ * DVSEC ID: 8, Revision 0
+ */
+typedef struct CXLDVSECRegisterLocator {
+ DVSECHeader hdr;
+ uint16_t rsvd;
+ uint32_t reg0_base_lo;
+ uint32_t reg0_base_hi;
+ uint32_t reg1_base_lo;
+ uint32_t reg1_base_hi;
+ uint32_t reg2_base_lo;
+ uint32_t reg2_base_hi;
+} CXLDVSECRegisterLocator;
+QEMU_BUILD_BUG_ON(sizeof(CXLDVSECRegisterLocator) != 0x24);
+
+/* BAR Equivalence Indicator */
+#define BEI_BAR_10H 0
+#define BEI_BAR_14H 1
+#define BEI_BAR_18H 2
+#define BEI_BAR_1cH 3
+#define BEI_BAR_20H 4
+#define BEI_BAR_24H 5
+
+/* Register Block Identifier */
+#define RBI_EMPTY 0
+#define RBI_COMPONENT_REG (1 << 8)
+#define RBI_BAR_VIRT_ACL (2 << 8)
+#define RBI_CXL_DEVICE_REG (3 << 8)
+
+#endif
diff --git a/include/hw/display/bcm2835_fb.h b/include/hw/display/bcm2835_fb.h
index 2246be74d8..49541bf08f 100644
--- a/include/hw/display/bcm2835_fb.h
+++ b/include/hw/display/bcm2835_fb.h
@@ -14,9 +14,12 @@
#include "hw/sysbus.h"
#include "ui/console.h"
+#include "qom/object.h"
+
+#define UPPER_RAM_BASE 0x40000000
#define TYPE_BCM2835_FB "bcm2835-fb"
-#define BCM2835_FB(obj) OBJECT_CHECK(BCM2835FBState, (obj), TYPE_BCM2835_FB)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835FBState, BCM2835_FB)
/*
* Configuration information about the fb which the guest can program
@@ -32,7 +35,7 @@ typedef struct {
uint32_t alpha;
} BCM2835FBConfig;
-typedef struct {
+struct BCM2835FBState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -49,7 +52,7 @@ typedef struct {
BCM2835FBConfig config;
BCM2835FBConfig initial_config;
-} BCM2835FBState;
+};
void bcm2835_fb_reconfigure(BCM2835FBState *s, BCM2835FBConfig *newconfig);
diff --git a/include/hw/display/dpcd.h b/include/hw/display/dpcd.h
index 6880ee36a3..a4e37abf6f 100644
--- a/include/hw/display/dpcd.h
+++ b/include/hw/display/dpcd.h
@@ -24,11 +24,11 @@
#ifndef DPCD_H
#define DPCD_H
+#include "qom/object.h"
-typedef struct DPCDState DPCDState;
#define TYPE_DPCD "dpcd"
-#define DPCD(obj) OBJECT_CHECK(DPCDState, (obj), TYPE_DPCD)
+OBJECT_DECLARE_SIMPLE_TYPE(DPCDState, DPCD)
/* DCPD Revision. */
#define DPCD_REVISION 0x00
diff --git a/include/hw/display/edid.h b/include/hw/display/edid.h
index 5b1de57f24..520f8ec202 100644
--- a/include/hw/display/edid.h
+++ b/include/hw/display/edid.h
@@ -5,11 +5,13 @@ typedef struct qemu_edid_info {
const char *vendor; /* http://www.uefi.org/pnp_id_list */
const char *name;
const char *serial;
- uint32_t dpi;
+ uint16_t width_mm;
+ uint16_t height_mm;
uint32_t prefx;
uint32_t prefy;
uint32_t maxx;
uint32_t maxy;
+ uint32_t refresh_rate;
} qemu_edid_info;
void qemu_edid_generate(uint8_t *edid, size_t size,
@@ -18,10 +20,13 @@ size_t qemu_edid_size(uint8_t *edid);
void qemu_edid_region_io(MemoryRegion *region, Object *owner,
uint8_t *edid, size_t size);
-#define DEFINE_EDID_PROPERTIES(_state, _edid_info) \
- DEFINE_PROP_UINT32("xres", _state, _edid_info.prefx, 0), \
- DEFINE_PROP_UINT32("yres", _state, _edid_info.prefy, 0), \
- DEFINE_PROP_UINT32("xmax", _state, _edid_info.maxx, 0), \
- DEFINE_PROP_UINT32("ymax", _state, _edid_info.maxy, 0)
+uint32_t qemu_edid_dpi_to_mm(uint32_t dpi, uint32_t res);
+
+#define DEFINE_EDID_PROPERTIES(_state, _edid_info) \
+ DEFINE_PROP_UINT32("xres", _state, _edid_info.prefx, 0), \
+ DEFINE_PROP_UINT32("yres", _state, _edid_info.prefy, 0), \
+ DEFINE_PROP_UINT32("xmax", _state, _edid_info.maxx, 0), \
+ DEFINE_PROP_UINT32("ymax", _state, _edid_info.maxy, 0), \
+ DEFINE_PROP_UINT32("refresh_rate", _state, _edid_info.refresh_rate, 0)
#endif /* EDID_H */
diff --git a/include/hw/display/i2c-ddc.h b/include/hw/display/i2c-ddc.h
index 1cf53a0c8d..94b5880587 100644
--- a/include/hw/display/i2c-ddc.h
+++ b/include/hw/display/i2c-ddc.h
@@ -21,6 +21,7 @@
#include "hw/display/edid.h"
#include "hw/i2c/i2c.h"
+#include "qom/object.h"
/* A simple I2C slave which just returns the contents of its EDID blob. */
struct I2CDDCState {
@@ -33,9 +34,8 @@ struct I2CDDCState {
uint8_t edid_blob[128];
};
-typedef struct I2CDDCState I2CDDCState;
#define TYPE_I2CDDC "i2c-ddc"
-#define I2CDDC(obj) OBJECT_CHECK(I2CDDCState, (obj), TYPE_I2CDDC)
+OBJECT_DECLARE_SIMPLE_TYPE(I2CDDCState, I2CDDC)
#endif /* I2C_DDC_H */
diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h
index 26367ae2c4..27cebefc9e 100644
--- a/include/hw/display/macfb.h
+++ b/include/hw/display/macfb.h
@@ -13,9 +13,44 @@
#ifndef MACFB_H
#define MACFB_H
-#include "qemu/osdep.h"
#include "exec/memory.h"
+#include "hw/irq.h"
+#include "hw/nubus/nubus.h"
+#include "hw/sysbus.h"
#include "ui/console.h"
+#include "qemu/timer.h"
+
+typedef enum {
+ MACFB_DISPLAY_APPLE_21_COLOR = 0,
+ MACFB_DISPLAY_APPLE_PORTRAIT = 1,
+ MACFB_DISPLAY_APPLE_12_RGB = 2,
+ MACFB_DISPLAY_APPLE_2PAGE_MONO = 3,
+ MACFB_DISPLAY_NTSC_UNDERSCAN = 4,
+ MACFB_DISPLAY_NTSC_OVERSCAN = 5,
+ MACFB_DISPLAY_APPLE_12_MONO = 6,
+ MACFB_DISPLAY_APPLE_13_RGB = 7,
+ MACFB_DISPLAY_16_COLOR = 8,
+ MACFB_DISPLAY_PAL1_UNDERSCAN = 9,
+ MACFB_DISPLAY_PAL1_OVERSCAN = 10,
+ MACFB_DISPLAY_PAL2_UNDERSCAN = 11,
+ MACFB_DISPLAY_PAL2_OVERSCAN = 12,
+ MACFB_DISPLAY_VGA = 13,
+ MACFB_DISPLAY_SVGA = 14,
+} MacfbDisplayType;
+
+typedef struct MacFbMode {
+ uint8_t type;
+ uint8_t depth;
+ uint32_t mode_ctrl1;
+ uint32_t mode_ctrl2;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t offset;
+} MacFbMode;
+
+#define MACFB_CTRL_TOPADDR 0x200
+#define MACFB_NUM_REGS (MACFB_CTRL_TOPADDR / sizeof(uint32_t))
typedef struct MacfbState {
MemoryRegion mem_vram;
@@ -28,37 +63,39 @@ typedef struct MacfbState {
uint8_t color_palette[256 * 3];
uint32_t width, height; /* in pixels */
uint8_t depth;
+ uint8_t type;
+
+ uint32_t regs[MACFB_NUM_REGS];
+ MacFbMode *mode;
+
+ QEMUTimer *vbl_timer;
+ qemu_irq irq;
} MacfbState;
#define TYPE_MACFB "sysbus-macfb"
-#define MACFB(obj) \
- OBJECT_CHECK(MacfbSysBusState, (obj), TYPE_MACFB)
+OBJECT_DECLARE_SIMPLE_TYPE(MacfbSysBusState, MACFB)
-typedef struct {
+struct MacfbSysBusState {
SysBusDevice busdev;
MacfbState macfb;
-} MacfbSysBusState;
+};
-#define MACFB_NUBUS_DEVICE_CLASS(class) \
- OBJECT_CLASS_CHECK(MacfbNubusDeviceClass, (class), TYPE_NUBUS_MACFB)
-#define MACFB_NUBUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MacfbNubusDeviceClass, (obj), TYPE_NUBUS_MACFB)
+#define TYPE_NUBUS_MACFB "nubus-macfb"
+OBJECT_DECLARE_TYPE(MacfbNubusState, MacfbNubusDeviceClass, NUBUS_MACFB)
-typedef struct MacfbNubusDeviceClass {
+struct MacfbNubusDeviceClass {
DeviceClass parent_class;
DeviceRealize parent_realize;
-} MacfbNubusDeviceClass;
+ DeviceUnrealize parent_unrealize;
+};
-#define TYPE_NUBUS_MACFB "nubus-macfb"
-#define NUBUS_MACFB(obj) \
- OBJECT_CHECK(MacfbNubusState, (obj), TYPE_NUBUS_MACFB)
-typedef struct {
+struct MacfbNubusState {
NubusDevice busdev;
MacfbState macfb;
-} MacfbNubusState;
+};
#endif
diff --git a/include/hw/display/milkymist_tmu2.h b/include/hw/display/milkymist_tmu2.h
deleted file mode 100644
index e3394ff158..0000000000
--- a/include/hw/display/milkymist_tmu2.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * QEMU model of the Milkymist texture mapping unit.
- *
- * Copyright (c) 2010 Michael Walle <michael@walle.cc>
- * Copyright (c) 2010 Sebastien Bourdeauducq
- * <sebastien.bourdeauducq@lekernel.net>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- *
- *
- * Specification available at:
- * http://milkymist.walle.cc/socdoc/tmu2.pdf
- *
- */
-
-#ifndef HW_DISPLAY_MILKYMIST_TMU2_H
-#define HW_DISPLAY_MILKYMIST_TMU2_H
-
-#include "exec/hwaddr.h"
-#include "hw/qdev-core.h"
-
-#if defined(CONFIG_X11) && defined(CONFIG_OPENGL)
-DeviceState *milkymist_tmu2_create(hwaddr base, qemu_irq irq);
-#else
-static inline DeviceState *milkymist_tmu2_create(hwaddr base, qemu_irq irq)
-{
- return NULL;
-}
-#endif
-
-#endif /* HW_DISPLAY_MILKYMIST_TMU2_H */
diff --git a/include/hw/display/ramfb.h b/include/hw/display/ramfb.h
index b33a2c467b..a7e0019144 100644
--- a/include/hw/display/ramfb.h
+++ b/include/hw/display/ramfb.h
@@ -1,11 +1,15 @@
#ifndef RAMFB_H
#define RAMFB_H
+#include "migration/vmstate.h"
+
/* ramfb.c */
typedef struct RAMFBState RAMFBState;
void ramfb_display_update(QemuConsole *con, RAMFBState *s);
RAMFBState *ramfb_setup(Error **errp);
+extern const VMStateDescription ramfb_vmstate;
+
/* ramfb-standalone.c */
#define TYPE_RAMFB_DEVICE "ramfb"
diff --git a/include/hw/display/vga.h b/include/hw/display/vga.h
index ca0003dbfd..a79aa2909b 100644
--- a/include/hw/display/vga.h
+++ b/include/hw/display/vga.h
@@ -9,7 +9,11 @@
#ifndef QEMU_HW_DISPLAY_VGA_H
#define QEMU_HW_DISPLAY_VGA_H
-#include "exec/hwaddr.h"
+/*
+ * modules can reference this symbol to avoid being loaded
+ * into system emulators without vga support
+ */
+extern bool have_vga;
enum vga_retrace_method {
VGA_RETRACE_DUMB,
@@ -18,8 +22,6 @@ enum vga_retrace_method {
extern enum vga_retrace_method vga_retrace_method;
-int isa_vga_mm_init(hwaddr vram_base,
- hwaddr ctrl_base, int it_shift,
- MemoryRegion *address_space);
+#define TYPE_VGA_MMIO "vga-mmio"
#endif
diff --git a/include/hw/display/xlnx_dp.h b/include/hw/display/xlnx_dp.h
index ab0dd250cc..e86a87f235 100644
--- a/include/hw/display/xlnx_dp.h
+++ b/include/hw/display/xlnx_dp.h
@@ -34,21 +34,28 @@
#include "qemu/units.h"
#include "hw/dma/xlnx_dpdma.h"
#include "audio/audio.h"
+#include "qom/object.h"
+#include "hw/ptimer.h"
#define AUD_CHBUF_MAX_DEPTH (32 * KiB)
#define MAX_QEMU_BUFFER_SIZE (4 * KiB)
-#define DP_CORE_REG_ARRAY_SIZE (0x3AF >> 2)
+#define DP_CORE_REG_OFFSET (0x0000)
+#define DP_CORE_REG_ARRAY_SIZE (0x3B0 >> 2)
+#define DP_AVBUF_REG_OFFSET (0xB000)
#define DP_AVBUF_REG_ARRAY_SIZE (0x238 >> 2)
-#define DP_VBLEND_REG_ARRAY_SIZE (0x1DF >> 2)
+#define DP_VBLEND_REG_OFFSET (0xA000)
+#define DP_VBLEND_REG_ARRAY_SIZE (0x1E0 >> 2)
+#define DP_AUDIO_REG_OFFSET (0xC000)
#define DP_AUDIO_REG_ARRAY_SIZE (0x50 >> 2)
+#define DP_CONTAINER_SIZE (0xC050)
struct PixmanPlane {
pixman_format_code_t format;
DisplaySurface *surface;
};
-typedef struct XlnxDPState {
+struct XlnxDPState {
/*< private >*/
SysBusDevice parent_obj;
@@ -101,9 +108,11 @@ typedef struct XlnxDPState {
*/
DPCDState *dpcd;
I2CDDCState *edid;
-} XlnxDPState;
+
+ ptimer_state *vblank;
+};
#define TYPE_XLNX_DP "xlnx.v-dp"
-#define XLNX_DP(obj) OBJECT_CHECK(XlnxDPState, (obj), TYPE_XLNX_DP)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxDPState, XLNX_DP)
#endif
diff --git a/include/hw/dma/bcm2835_dma.h b/include/hw/dma/bcm2835_dma.h
index a6747842b7..1d26b1d8d0 100644
--- a/include/hw/dma/bcm2835_dma.h
+++ b/include/hw/dma/bcm2835_dma.h
@@ -9,6 +9,7 @@
#define BCM2835_DMA_H
#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct {
uint32_t cs;
@@ -25,12 +26,11 @@ typedef struct {
} BCM2835DMAChan;
#define TYPE_BCM2835_DMA "bcm2835-dma"
-#define BCM2835_DMA(obj) \
- OBJECT_CHECK(BCM2835DMAState, (obj), TYPE_BCM2835_DMA)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835DMAState, BCM2835_DMA)
#define BCM2835_DMA_NCHANS 16
-typedef struct {
+struct BCM2835DMAState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -42,6 +42,6 @@ typedef struct {
BCM2835DMAChan chan[BCM2835_DMA_NCHANS];
uint32_t int_status;
uint32_t enable;
-} BCM2835DMAState;
+};
#endif
diff --git a/include/hw/dma/i8257.h b/include/hw/dma/i8257.h
index 03e2c166be..4342e4a91e 100644
--- a/include/hw/dma/i8257.h
+++ b/include/hw/dma/i8257.h
@@ -3,8 +3,10 @@
#include "hw/isa/isa.h"
#include "exec/ioport.h"
+#include "qom/object.h"
#define TYPE_I8257 "i8257"
+OBJECT_DECLARE_SIMPLE_TYPE(I8257State, I8257)
typedef struct I8257Regs {
int now[2];
@@ -18,7 +20,7 @@ typedef struct I8257Regs {
void *opaque;
} I8257Regs;
-typedef struct I8257State {
+struct I8257State {
/* <private> */
ISADevice parent_obj;
@@ -41,8 +43,8 @@ typedef struct I8257State {
int running;
PortioList portio_page;
PortioList portio_pageh;
-} I8257State;
+};
-void i8257_dma_init(ISABus *bus, bool high_page_enable);
+void i8257_dma_init(Object *parent, ISABus *bus, bool high_page_enable);
#endif
diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h
index 9d4b3df143..3c9659e438 100644
--- a/include/hw/dma/pl080.h
+++ b/include/hw/dma/pl080.h
@@ -10,11 +10,12 @@
* (at your option) any later version.
*/
-/* This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
+/*
+ * This is a model of the Arm PrimeCell PL080/PL081 DMA controller:
* The PL080 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0196g/DDI0196.pdf
+ * https://developer.arm.com/documentation/ddi0196/latest
* and the PL081 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0218e/DDI0218.pdf
+ * https://developer.arm.com/documentation/ddi0218/latest
*
* QEMU interface:
* + sysbus IRQ 0: DMACINTR combined interrupt line
@@ -29,6 +30,7 @@
#define HW_DMA_PL080_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define PL080_MAX_CHANNELS 8
@@ -42,9 +44,9 @@ typedef struct {
#define TYPE_PL080 "pl080"
#define TYPE_PL081 "pl081"
-#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
+OBJECT_DECLARE_SIMPLE_TYPE(PL080State, PL080)
-typedef struct PL080State {
+struct PL080State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -66,6 +68,6 @@ typedef struct PL080State {
MemoryRegion *downstream;
AddressSpace downstream_as;
-} PL080State;
+};
#endif
diff --git a/include/hw/dma/sifive_pdma.h b/include/hw/dma/sifive_pdma.h
new file mode 100644
index 0000000000..8c6cfa7f32
--- /dev/null
+++ b/include/hw/dma/sifive_pdma.h
@@ -0,0 +1,59 @@
+/*
+ * SiFive Platform DMA emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SIFIVE_PDMA_H
+#define SIFIVE_PDMA_H
+
+#include "hw/sysbus.h"
+
+struct sifive_pdma_chan {
+ uint32_t control;
+ uint32_t next_config;
+ uint64_t next_bytes;
+ uint64_t next_dst;
+ uint64_t next_src;
+ uint32_t exec_config;
+ uint64_t exec_bytes;
+ uint64_t exec_dst;
+ uint64_t exec_src;
+ int state;
+};
+
+#define SIFIVE_PDMA_CHANS 4
+#define SIFIVE_PDMA_IRQS (SIFIVE_PDMA_CHANS * 2)
+#define SIFIVE_PDMA_REG_SIZE 0x100000
+#define SIFIVE_PDMA_CHAN_NO(reg) ((reg & (SIFIVE_PDMA_REG_SIZE - 1)) >> 12)
+
+typedef struct SiFivePDMAState {
+ SysBusDevice parent;
+ MemoryRegion iomem;
+ qemu_irq irq[SIFIVE_PDMA_IRQS];
+
+ struct sifive_pdma_chan chan[SIFIVE_PDMA_CHANS];
+} SiFivePDMAState;
+
+#define TYPE_SIFIVE_PDMA "sifive.pdma"
+
+#define SIFIVE_PDMA(obj) \
+ OBJECT_CHECK(SiFivePDMAState, (obj), TYPE_SIFIVE_PDMA)
+
+#endif /* SIFIVE_PDMA_H */
diff --git a/include/hw/dma/xlnx-zdma.h b/include/hw/dma/xlnx-zdma.h
index 0b240b4c3c..efc75217d5 100644
--- a/include/hw/dma/xlnx-zdma.h
+++ b/include/hw/dma/xlnx-zdma.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/register.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define ZDMA_R_MAX (0x204 / 4)
@@ -50,12 +51,12 @@ typedef union {
uint32_t words[4];
} XlnxZDMADescr;
-typedef struct XlnxZDMA {
+struct XlnxZDMA {
SysBusDevice parent_obj;
MemoryRegion iomem;
MemTxAttrs attr;
MemoryRegion *dma_mr;
- AddressSpace *dma_as;
+ AddressSpace dma_as;
qemu_irq irq_zdma_ch_imr;
struct {
@@ -74,11 +75,10 @@ typedef struct XlnxZDMA {
/* We don't model the common bufs. Must be at least 16 bytes
to model write only mode. */
uint8_t buf[2048];
-} XlnxZDMA;
+};
#define TYPE_XLNX_ZDMA "xlnx.zdma"
-#define XLNX_ZDMA(obj) \
- OBJECT_CHECK(XlnxZDMA, (obj), TYPE_XLNX_ZDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZDMA, XLNX_ZDMA)
#endif /* XLNX_ZDMA_H */
diff --git a/include/hw/dma/xlnx-zynq-devcfg.h b/include/hw/dma/xlnx-zynq-devcfg.h
index 1d3969d91f..e4cf085d70 100644
--- a/include/hw/dma/xlnx-zynq-devcfg.h
+++ b/include/hw/dma/xlnx-zynq-devcfg.h
@@ -29,11 +29,11 @@
#include "hw/register.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQ_DEVCFG "xlnx.ps7-dev-cfg"
-#define XLNX_ZYNQ_DEVCFG(obj) \
- OBJECT_CHECK(XlnxZynqDevcfg, (obj), TYPE_XLNX_ZYNQ_DEVCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG)
#define XLNX_ZYNQ_DEVCFG_R_MAX (0x100 / 4)
@@ -46,7 +46,7 @@ typedef struct XlnxZynqDevcfgDMACmd {
uint32_t dest_len;
} XlnxZynqDevcfgDMACmd;
-typedef struct XlnxZynqDevcfg {
+struct XlnxZynqDevcfg {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -57,6 +57,6 @@ typedef struct XlnxZynqDevcfg {
uint32_t regs[XLNX_ZYNQ_DEVCFG_R_MAX];
RegisterInfo regs_info[XLNX_ZYNQ_DEVCFG_R_MAX];
-} XlnxZynqDevcfg;
+};
#endif
diff --git a/include/hw/dma/xlnx_csu_dma.h b/include/hw/dma/xlnx_csu_dma.h
new file mode 100644
index 0000000000..922ab80eb6
--- /dev/null
+++ b/include/hw/dma/xlnx_csu_dma.h
@@ -0,0 +1,72 @@
+/*
+ * Xilinx Platform CSU Stream DMA emulation
+ *
+ * This implementation is based on
+ * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XLNX_CSU_DMA_H
+#define XLNX_CSU_DMA_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/ptimer.h"
+#include "hw/stream.h"
+
+#define TYPE_XLNX_CSU_DMA "xlnx.csu_dma"
+
+#define XLNX_CSU_DMA_R_MAX (0x2c / 4)
+
+typedef struct XlnxCSUDMA {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ MemTxAttrs attr;
+ MemoryRegion *dma_mr;
+ AddressSpace dma_as;
+ qemu_irq irq;
+ StreamSink *tx_dev; /* Used as generic StreamSink */
+ ptimer_state *src_timer;
+
+ uint16_t width;
+ bool is_dst;
+ bool r_size_last_word;
+
+ StreamCanPushNotifyFn notify;
+ void *notify_opaque;
+
+ uint32_t regs[XLNX_CSU_DMA_R_MAX];
+ RegisterInfo regs_info[XLNX_CSU_DMA_R_MAX];
+} XlnxCSUDMA;
+
+OBJECT_DECLARE_TYPE(XlnxCSUDMA, XlnxCSUDMAClass, XLNX_CSU_DMA)
+
+struct XlnxCSUDMAClass {
+ SysBusDeviceClass parent_class;
+
+ /*
+ * read: Start a read transfer on a Xilinx CSU DMA engine
+ *
+ * @s: the Xilinx CSU DMA engine to start the transfer on
+ * @addr: the address to read
+ * @len: the number of bytes to read at 'addr'
+ *
+ * @return a MemTxResult indicating whether the operation succeeded ('len'
+ * bytes were read) or failed.
+ */
+ MemTxResult (*read)(XlnxCSUDMA *s, hwaddr addr, uint32_t len);
+};
+
+#endif
diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h
index 7a304a5bb4..40537a848b 100644
--- a/include/hw/dma/xlnx_dpdma.h
+++ b/include/hw/dma/xlnx_dpdma.h
@@ -28,6 +28,7 @@
#include "hw/sysbus.h"
#include "ui/console.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define XLNX_DPDMA_REG_ARRAY_SIZE (0x1000 >> 2)
@@ -42,10 +43,9 @@ struct XlnxDPDMAState {
qemu_irq irq;
};
-typedef struct XlnxDPDMAState XlnxDPDMAState;
#define TYPE_XLNX_DPDMA "xlnx.dpdma"
-#define XLNX_DPDMA(obj) OBJECT_CHECK(XlnxDPDMAState, (obj), TYPE_XLNX_DPDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxDPDMAState, XLNX_DPDMA)
/*
* xlnx_dpdma_start_operation: Start the operation on the specified channel. The
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
index 6fdff3dced..9c35d1b9da 100644
--- a/include/hw/elf_ops.h
+++ b/include/hw/elf_ops.h
@@ -1,30 +1,30 @@
static void glue(bswap_ehdr, SZ)(struct elfhdr *ehdr)
{
- bswap16s(&ehdr->e_type); /* Object file type */
- bswap16s(&ehdr->e_machine); /* Architecture */
- bswap32s(&ehdr->e_version); /* Object file version */
- bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
- bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
- bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
- bswap32s(&ehdr->e_flags); /* Processor-specific flags */
- bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
- bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
- bswap16s(&ehdr->e_phnum); /* Program header table entry count */
- bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
- bswap16s(&ehdr->e_shnum); /* Section header table entry count */
- bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
+ bswap16s(&ehdr->e_type); /* Object file type */
+ bswap16s(&ehdr->e_machine); /* Architecture */
+ bswap32s(&ehdr->e_version); /* Object file version */
+ bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
+ bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
+ bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
+ bswap32s(&ehdr->e_flags); /* Processor-specific flags */
+ bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
+ bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
+ bswap16s(&ehdr->e_phnum); /* Program header table entry count */
+ bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
+ bswap16s(&ehdr->e_shnum); /* Section header table entry count */
+ bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
}
static void glue(bswap_phdr, SZ)(struct elf_phdr *phdr)
{
- bswap32s(&phdr->p_type); /* Segment type */
- bswapSZs(&phdr->p_offset); /* Segment file offset */
- bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
- bswapSZs(&phdr->p_paddr); /* Segment physical address */
- bswapSZs(&phdr->p_filesz); /* Segment size in file */
- bswapSZs(&phdr->p_memsz); /* Segment size in memory */
- bswap32s(&phdr->p_flags); /* Segment flags */
- bswapSZs(&phdr->p_align); /* Segment alignment */
+ bswap32s(&phdr->p_type); /* Segment type */
+ bswapSZs(&phdr->p_offset); /* Segment file offset */
+ bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
+ bswapSZs(&phdr->p_paddr); /* Segment physical address */
+ bswapSZs(&phdr->p_filesz); /* Segment size in file */
+ bswapSZs(&phdr->p_memsz); /* Segment size in memory */
+ bswap32s(&phdr->p_flags); /* Segment flags */
+ bswapSZs(&phdr->p_align); /* Segment alignment */
}
static void glue(bswap_shdr, SZ)(struct elf_shdr *shdr)
@@ -117,7 +117,7 @@ static void glue(load_symbols, SZ)(struct elfhdr *ehdr, int fd, int must_swab,
shdr_table = load_at(fd, ehdr->e_shoff,
sizeof(struct elf_shdr) * ehdr->e_shnum);
if (!shdr_table) {
- return ;
+ return;
}
if (must_swab) {
@@ -312,26 +312,26 @@ static struct elf_note *glue(get_elf_note_type, SZ)(struct elf_note *nhdr,
return nhdr;
}
-static int glue(load_elf, SZ)(const char *name, int fd,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque,
- int must_swab, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr,
- uint32_t *pflags, int elf_machine,
- int clear_lsb, int data_swab,
- AddressSpace *as, bool load_rom,
- symbol_fn_t sym_cb)
+static ssize_t glue(load_elf, SZ)(const char *name, int fd,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque,
+ int must_swab, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ uint32_t *pflags, int elf_machine,
+ int clear_lsb, int data_swab,
+ AddressSpace *as, bool load_rom,
+ symbol_fn_t sym_cb)
{
struct elfhdr ehdr;
struct elf_phdr *phdr = NULL, *ph;
- int size, i, total_size;
+ int size, i;
+ ssize_t total_size;
elf_word mem_size, file_size, data_offset;
uint64_t addr, low = (uint64_t)-1, high = 0;
GMappedFile *mapped_file = NULL;
uint8_t *data = NULL;
- char label[128];
- int ret = ELF_LOAD_FAILED;
+ ssize_t ret = ELF_LOAD_FAILED;
if (read(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
goto fail;
@@ -369,14 +369,6 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
break;
- case EM_MOXIE:
- if (ehdr.e_machine != EM_MOXIE) {
- if (ehdr.e_machine != EM_MOXIE_OLD) {
- ret = ELF_LOAD_WRONG_ARCH;
- goto fail;
- }
- }
- break;
case EM_MIPS:
case EM_NANOMIPS:
if ((ehdr.e_machine != EM_MIPS) &&
@@ -393,10 +385,11 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
if (pflags) {
- *pflags = (elf_word)ehdr.e_flags;
+ *pflags = ehdr.e_flags;
+ }
+ if (pentry) {
+ *pentry = ehdr.e_entry;
}
- if (pentry)
- *pentry = (uint64_t)(elf_sword)ehdr.e_entry;
glue(load_symbols, SZ)(&ehdr, fd, must_swab, clear_lsb, sym_cb);
@@ -418,7 +411,7 @@ static int glue(load_elf, SZ)(const char *name, int fd,
/*
* Since we want to be able to modify the mapped buffer, we set the
- * 'writeble' parameter to 'true'. Modifications to the buffer are not
+ * 'writable' parameter to 'true'. Modifications to the buffer are not
* written back to the file.
*/
mapped_file = g_mapped_file_new_from_fd(fd, true, NULL);
@@ -491,7 +484,7 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
}
- if (mem_size > INT_MAX - total_size) {
+ if (mem_size > SSIZE_MAX - total_size) {
ret = ELF_LOAD_TOO_BIG;
goto fail;
}
@@ -507,7 +500,7 @@ static int glue(load_elf, SZ)(const char *name, int fd,
}
if (data_swab) {
- int j;
+ elf_word j;
for (j = 0; j < file_size; j += (1 << data_swab)) {
uint8_t *dp = data + j;
switch (data_swab) {
@@ -544,7 +537,9 @@ static int glue(load_elf, SZ)(const char *name, int fd,
*/
if (mem_size != 0) {
if (load_rom) {
- snprintf(label, sizeof(label), "phdr #%d: %s", i, name);
+ g_autofree char *label =
+ g_strdup_printf("%s ELF program header segment %d",
+ name, i);
/*
* rom_add_elf_program() takes its own reference to
@@ -561,6 +556,19 @@ static int glue(load_elf, SZ)(const char *name, int fd,
if (res != MEMTX_OK) {
goto fail;
}
+ /*
+ * We need to zero'ify the space that is not copied
+ * from file
+ */
+ if (file_size < mem_size) {
+ res = address_space_set(as ? as : &address_space_memory,
+ addr + file_size, 0,
+ mem_size - file_size,
+ MEMTXATTRS_UNSPECIFIED);
+ if (res != MEMTX_OK) {
+ goto fail;
+ }
+ }
}
}
@@ -597,18 +605,18 @@ static int glue(load_elf, SZ)(const char *name, int fd,
nhdr = glue(get_elf_note_type, SZ)(nhdr, file_size, ph->p_align,
*(uint64_t *)translate_opaque);
if (nhdr != NULL) {
- bool is64 =
- sizeof(struct elf_note) == sizeof(struct elf64_note);
- elf_note_fn((void *)nhdr, (void *)&ph->p_align, is64);
+ elf_note_fn((void *)nhdr, (void *)&ph->p_align, SZ == 64);
}
data = NULL;
}
}
- if (lowaddr)
- *lowaddr = (uint64_t)(elf_sword)low;
- if (highaddr)
- *highaddr = (uint64_t)(elf_sword)high;
+ if (lowaddr) {
+ *lowaddr = low;
+ }
+ if (highaddr) {
+ *highaddr = high;
+ }
ret = total_size;
fail:
if (mapped_file) {
diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h
index 02a0ced0a0..8d3fb2fb3b 100644
--- a/include/hw/firmware/smbios.h
+++ b/include/hw/firmware/smbios.h
@@ -1,6 +1,9 @@
#ifndef QEMU_SMBIOS_H
#define QEMU_SMBIOS_H
+#include "qapi/qapi-types-machine.h"
+#include "qemu/bitmap.h"
+
/*
* SMBIOS Support
*
@@ -14,8 +17,28 @@
*
*/
+extern uint8_t *usr_blobs;
+extern GArray *usr_blobs_sizes;
+
+typedef struct {
+ const char *vendor, *version, *date;
+ bool have_major_minor, uefi;
+ uint8_t major, minor;
+} smbios_type0_t;
+extern smbios_type0_t smbios_type0;
+
+typedef struct {
+ const char *manufacturer, *product, *version, *serial, *sku, *family;
+ /* uuid is in qemu_uuid */
+} smbios_type1_t;
+extern smbios_type1_t smbios_type1;
#define SMBIOS_MAX_TYPE 127
+extern DECLARE_BITMAP(smbios_have_binfile_bitmap, SMBIOS_MAX_TYPE + 1);
+extern DECLARE_BITMAP(smbios_have_fields_bitmap, SMBIOS_MAX_TYPE + 1);
+
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
/* memory area description, used by type 19 table */
struct smbios_phys_mem_area {
@@ -23,14 +46,6 @@ struct smbios_phys_mem_area {
uint64_t length;
};
-/*
- * SMBIOS spec defined tables
- */
-typedef enum SmbiosEntryPointType {
- SMBIOS_ENTRY_POINT_21,
- SMBIOS_ENTRY_POINT_30,
-} SmbiosEntryPointType;
-
/* SMBIOS Entry Point
* There are two types of entry points defined in the SMBIOS specification
* (see below). BIOS must place the entry point(s) at a 16-byte-aligned
@@ -193,6 +208,43 @@ struct smbios_type_4 {
uint8_t thread_count;
uint16_t processor_characteristics;
uint16_t processor_family2;
+ /* SMBIOS spec 3.0.0, Table 21 */
+ uint16_t core_count2;
+ uint16_t core_enabled2;
+ uint16_t thread_count2;
+} QEMU_PACKED;
+
+typedef enum smbios_type_4_len_ver {
+ SMBIOS_TYPE_4_LEN_V28 = offsetofend(struct smbios_type_4,
+ processor_family2),
+ SMBIOS_TYPE_4_LEN_V30 = offsetofend(struct smbios_type_4, thread_count2),
+} smbios_type_4_len_ver;
+
+/* SMBIOS type 8 - Port Connector Information */
+struct smbios_type_8 {
+ struct smbios_structure_header header;
+ uint8_t internal_reference_str;
+ uint8_t internal_connector_type;
+ uint8_t external_reference_str;
+ uint8_t external_connector_type;
+ uint8_t port_type;
+} QEMU_PACKED;
+
+/* SMBIOS type 9 - System Slots (v2.1+) */
+struct smbios_type_9 {
+ struct smbios_structure_header header;
+ uint8_t slot_designation;
+ uint8_t slot_type;
+ uint8_t slot_data_bus_width;
+ uint8_t current_usage;
+ uint8_t slot_length;
+ uint16_t slot_id;
+ uint8_t slot_characteristics1;
+ uint8_t slot_characteristics2;
+ /* SMBIOS spec v2.6+ */
+ uint16_t segment_group_number;
+ uint8_t bus_number;
+ uint8_t device_number;
} QEMU_PACKED;
/* SMBIOS type 11 - OEM strings */
@@ -258,20 +310,36 @@ struct smbios_type_32 {
uint8_t boot_status;
} QEMU_PACKED;
+/* SMBIOS type 41 - Onboard Devices Extended Information */
+struct smbios_type_41 {
+ struct smbios_structure_header header;
+ uint8_t reference_designation_str;
+ uint8_t device_type;
+ uint8_t device_type_instance;
+ uint16_t segment_group_number;
+ uint8_t bus_number;
+ uint8_t device_number;
+} QEMU_PACKED;
+
/* SMBIOS type 127 -- End-of-table */
struct smbios_type_127 {
struct smbios_structure_header header;
} QEMU_PACKED;
+bool smbios_validate_table(SmbiosEntryPointType ep_type, Error **errp);
+void smbios_add_usr_blob_size(size_t size);
void smbios_entry_add(QemuOpts *opts, Error **errp);
void smbios_set_cpuid(uint32_t version, uint32_t features);
void smbios_set_defaults(const char *manufacturer, const char *product,
- const char *version, bool legacy_mode,
- bool uuid_encoded, SmbiosEntryPointType ep_type);
-uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length);
+ const char *version,
+ bool uuid_encoded);
+void smbios_set_default_processor_family(uint16_t processor_family);
+uint8_t *smbios_get_table_legacy(size_t *length, Error **errp);
void smbios_get_tables(MachineState *ms,
+ SmbiosEntryPointType ep_type,
const struct smbios_phys_mem_area *mem_array,
const unsigned int mem_array_size,
uint8_t **tables, size_t *tables_len,
- uint8_t **anchor, size_t *anchor_len);
+ uint8_t **anchor, size_t *anchor_len,
+ Error **errp);
#endif /* QEMU_SMBIOS_H */
diff --git a/include/hw/fsi/aspeed_apb2opb.h b/include/hw/fsi/aspeed_apb2opb.h
new file mode 100644
index 0000000000..f6a2387abf
--- /dev/null
+++ b/include/hw/fsi/aspeed_apb2opb.h
@@ -0,0 +1,46 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * ASPEED APB2OPB Bridge
+ * IBM On-Chip Peripheral Bus
+ */
+#ifndef FSI_ASPEED_APB2OPB_H
+#define FSI_ASPEED_APB2OPB_H
+
+#include "exec/memory.h"
+#include "hw/fsi/fsi-master.h"
+#include "hw/sysbus.h"
+
+#define TYPE_FSI_OPB "fsi.opb"
+
+#define TYPE_OP_BUS "opb"
+OBJECT_DECLARE_SIMPLE_TYPE(OPBus, OP_BUS)
+
+typedef struct OPBus {
+ BusState bus;
+
+ MemoryRegion mr;
+ AddressSpace as;
+} OPBus;
+
+#define TYPE_ASPEED_APB2OPB "aspeed.apb2opb"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedAPB2OPBState, ASPEED_APB2OPB)
+
+#define ASPEED_APB2OPB_NR_REGS ((0xe8 >> 2) + 1)
+
+#define ASPEED_FSI_NUM 2
+
+typedef struct AspeedAPB2OPBState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[ASPEED_APB2OPB_NR_REGS];
+ qemu_irq irq;
+
+ OPBus opb[ASPEED_FSI_NUM];
+ FSIMasterState fsi[ASPEED_FSI_NUM];
+} AspeedAPB2OPBState;
+
+#endif /* FSI_ASPEED_APB2OPB_H */
diff --git a/include/hw/fsi/cfam.h b/include/hw/fsi/cfam.h
new file mode 100644
index 0000000000..7abc3b287b
--- /dev/null
+++ b/include/hw/fsi/cfam.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Common FRU Access Macro
+ */
+#ifndef FSI_CFAM_H
+#define FSI_CFAM_H
+
+#include "exec/memory.h"
+
+#include "hw/fsi/fsi.h"
+#include "hw/fsi/lbus.h"
+
+#define TYPE_FSI_CFAM "cfam"
+#define FSI_CFAM(obj) OBJECT_CHECK(FSICFAMState, (obj), TYPE_FSI_CFAM)
+
+/* P9-ism */
+#define CFAM_CONFIG_NR_REGS 0x28
+
+typedef struct FSICFAMState {
+ /* < private > */
+ FSISlaveState parent;
+
+ /* CFAM config address space */
+ MemoryRegion config_iomem;
+
+ MemoryRegion mr;
+
+ FSILBus lbus;
+ FSIScratchPad scratchpad;
+} FSICFAMState;
+
+#endif /* FSI_CFAM_H */
diff --git a/include/hw/fsi/fsi-master.h b/include/hw/fsi/fsi-master.h
new file mode 100644
index 0000000000..68e5f56db2
--- /dev/null
+++ b/include/hw/fsi/fsi-master.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Flexible Service Interface Master
+ */
+#ifndef FSI_FSI_MASTER_H
+#define FSI_FSI_MASTER_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "hw/fsi/fsi.h"
+#include "hw/fsi/cfam.h"
+
+#define TYPE_FSI_MASTER "fsi.master"
+OBJECT_DECLARE_SIMPLE_TYPE(FSIMasterState, FSI_MASTER)
+
+#define FSI_MASTER_NR_REGS ((0x2e0 >> 2) + 1)
+
+typedef struct FSIMasterState {
+ DeviceState parent;
+ MemoryRegion iomem;
+ MemoryRegion opb2fsi;
+
+ FSIBus bus;
+
+ uint32_t regs[FSI_MASTER_NR_REGS];
+ FSICFAMState cfam;
+} FSIMasterState;
+
+
+#endif /* FSI_FSI_H */
diff --git a/include/hw/fsi/fsi.h b/include/hw/fsi/fsi.h
new file mode 100644
index 0000000000..e00f6ef078
--- /dev/null
+++ b/include/hw/fsi/fsi.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Flexible Service Interface
+ */
+#ifndef FSI_FSI_H
+#define FSI_FSI_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+#include "hw/fsi/lbus.h"
+#include "qemu/bitops.h"
+
+/* Bitwise operations at the word level. */
+#define BE_GENMASK(hb, lb) MAKE_64BIT_MASK((lb), ((hb) - (lb) + 1))
+
+#define TYPE_FSI_BUS "fsi.bus"
+OBJECT_DECLARE_SIMPLE_TYPE(FSIBus, FSI_BUS)
+
+typedef struct FSIBus {
+ BusState bus;
+} FSIBus;
+
+#define TYPE_FSI_SLAVE "fsi.slave"
+OBJECT_DECLARE_SIMPLE_TYPE(FSISlaveState, FSI_SLAVE)
+
+#define FSI_SLAVE_CONTROL_NR_REGS ((0x40 >> 2) + 1)
+
+typedef struct FSISlaveState {
+ DeviceState parent;
+
+ MemoryRegion iomem;
+ uint32_t regs[FSI_SLAVE_CONTROL_NR_REGS];
+} FSISlaveState;
+
+#endif /* FSI_FSI_H */
diff --git a/include/hw/fsi/lbus.h b/include/hw/fsi/lbus.h
new file mode 100644
index 0000000000..558268c013
--- /dev/null
+++ b/include/hw/fsi/lbus.h
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (C) 2024 IBM Corp.
+ *
+ * IBM Local bus and connected device structures.
+ */
+#ifndef FSI_LBUS_H
+#define FSI_LBUS_H
+
+#include "hw/qdev-core.h"
+#include "qemu/units.h"
+#include "exec/memory.h"
+
+#define TYPE_FSI_LBUS_DEVICE "fsi.lbus.device"
+OBJECT_DECLARE_SIMPLE_TYPE(FSILBusDevice, FSI_LBUS_DEVICE)
+
+typedef struct FSILBusDevice {
+ DeviceState parent;
+
+ MemoryRegion iomem;
+} FSILBusDevice;
+
+#define TYPE_FSI_LBUS "fsi.lbus"
+OBJECT_DECLARE_SIMPLE_TYPE(FSILBus, FSI_LBUS)
+
+typedef struct FSILBus {
+ BusState bus;
+
+ MemoryRegion mr;
+} FSILBus;
+
+#define TYPE_FSI_SCRATCHPAD "fsi.scratchpad"
+#define SCRATCHPAD(obj) OBJECT_CHECK(FSIScratchPad, (obj), TYPE_FSI_SCRATCHPAD)
+
+#define FSI_SCRATCHPAD_NR_REGS 4
+
+typedef struct FSIScratchPad {
+ FSILBusDevice parent;
+
+ uint32_t regs[FSI_SCRATCHPAD_NR_REGS];
+} FSIScratchPad;
+
+#endif /* FSI_LBUS_H */
diff --git a/include/hw/fw-path-provider.h b/include/hw/fw-path-provider.h
index 10d1bd4959..8e1d45651c 100644
--- a/include/hw/fw-path-provider.h
+++ b/include/hw/fw-path-provider.h
@@ -22,20 +22,19 @@
#define TYPE_FW_PATH_PROVIDER "fw-path-provider"
-#define FW_PATH_PROVIDER_CLASS(klass) \
- OBJECT_CLASS_CHECK(FWPathProviderClass, (klass), TYPE_FW_PATH_PROVIDER)
-#define FW_PATH_PROVIDER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(FWPathProviderClass, (obj), TYPE_FW_PATH_PROVIDER)
+typedef struct FWPathProviderClass FWPathProviderClass;
+DECLARE_CLASS_CHECKERS(FWPathProviderClass, FW_PATH_PROVIDER,
+ TYPE_FW_PATH_PROVIDER)
#define FW_PATH_PROVIDER(obj) \
INTERFACE_CHECK(FWPathProvider, (obj), TYPE_FW_PATH_PROVIDER)
typedef struct FWPathProvider FWPathProvider;
-typedef struct FWPathProviderClass {
+struct FWPathProviderClass {
InterfaceClass parent_class;
char *(*get_dev_path)(FWPathProvider *p, BusState *bus, DeviceState *dev);
-} FWPathProviderClass;
+};
char *fw_path_provider_get_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h
index a2deac046a..904eecf62c 100644
--- a/include/hw/gpio/aspeed_gpio.h
+++ b/include/hw/gpio/aspeed_gpio.h
@@ -11,18 +11,15 @@
#define ASPEED_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_GPIO "aspeed.gpio"
-#define ASPEED_GPIO(obj) OBJECT_CHECK(AspeedGPIOState, (obj), TYPE_ASPEED_GPIO)
-#define ASPEED_GPIO_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedGPIOClass, (klass), TYPE_ASPEED_GPIO)
-#define ASPEED_GPIO_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedGPIOClass, (obj), TYPE_ASPEED_GPIO)
+OBJECT_DECLARE_TYPE(AspeedGPIOState, AspeedGPIOClass, ASPEED_GPIO)
#define ASPEED_GPIO_MAX_NR_SETS 8
+#define ASPEED_GPIOS_PER_SET 32
#define ASPEED_REGS_PER_BANK 14
#define ASPEED_GPIO_MAX_NR_REGS (ASPEED_REGS_PER_BANK * ASPEED_GPIO_MAX_NR_SETS)
-#define ASPEED_GPIO_NR_PINS 228
#define ASPEED_GROUPS_PER_SET 4
#define ASPEED_GPIO_NR_DEBOUNCE_REGS 3
#define ASPEED_CHARS_PER_GROUP_LABEL 4
@@ -53,21 +50,34 @@ enum GPIORegType {
gpio_reg_input_mask,
};
+/* GPIO index mode */
+enum GPIORegIndexType {
+ gpio_reg_idx_data = 0,
+ gpio_reg_idx_direction,
+ gpio_reg_idx_interrupt,
+ gpio_reg_idx_debounce,
+ gpio_reg_idx_tolerance,
+ gpio_reg_idx_cmd_src,
+ gpio_reg_idx_input_mask,
+ gpio_reg_idx_reserved,
+ gpio_reg_idx_new_w_cmd_src,
+ gpio_reg_idx_new_r_cmd_src,
+};
+
typedef struct AspeedGPIOReg {
uint16_t set_idx;
enum GPIORegType type;
- } AspeedGPIOReg;
+} AspeedGPIOReg;
-typedef struct AspeedGPIOClass {
+struct AspeedGPIOClass {
SysBusDevice parent_obj;
const GPIOSetProperties *props;
uint32_t nr_gpio_pins;
uint32_t nr_gpio_sets;
- uint32_t gap;
const AspeedGPIOReg *reg_table;
-} AspeedGPIOClass;
+};
-typedef struct AspeedGPIOState {
+struct AspeedGPIOState {
/* <private> */
SysBusDevice parent;
@@ -75,7 +85,7 @@ typedef struct AspeedGPIOState {
MemoryRegion iomem;
int pending;
qemu_irq irq;
- qemu_irq gpios[ASPEED_GPIO_NR_PINS];
+ qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET];
/* Parallel GPIO Registers */
uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS];
@@ -95,6 +105,6 @@ typedef struct AspeedGPIOState {
uint32_t debounce_2;
uint32_t input_mask;
} sets[ASPEED_GPIO_MAX_NR_SETS];
-} AspeedGPIOState;
+};
-#endif /* _ASPEED_GPIO_H_ */
+#endif /* ASPEED_GPIO_H */
diff --git a/include/hw/gpio/bcm2835_gpio.h b/include/hw/gpio/bcm2835_gpio.h
index b0de0a3c74..1c53a05090 100644
--- a/include/hw/gpio/bcm2835_gpio.h
+++ b/include/hw/gpio/bcm2835_gpio.h
@@ -16,8 +16,9 @@
#include "hw/sd/sd.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
-typedef struct BCM2835GpioState {
+struct BCM2835GpioState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -31,10 +32,9 @@ typedef struct BCM2835GpioState {
uint32_t lev0, lev1;
uint8_t sd_fsel;
qemu_irq out[54];
-} BCM2835GpioState;
+};
#define TYPE_BCM2835_GPIO "bcm2835_gpio"
-#define BCM2835_GPIO(obj) \
- OBJECT_CHECK(BCM2835GpioState, (obj), TYPE_BCM2835_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835GpioState, BCM2835_GPIO)
#endif
diff --git a/include/hw/gpio/bcm2838_gpio.h b/include/hw/gpio/bcm2838_gpio.h
new file mode 100644
index 0000000000..f2a57a697f
--- /dev/null
+++ b/include/hw/gpio/bcm2838_gpio.h
@@ -0,0 +1,45 @@
+/*
+ * Raspberry Pi (BCM2838) GPIO Controller
+ * This implementation is based on bcm2835_gpio (hw/gpio/bcm2835_gpio.c)
+ *
+ * Copyright (c) 2022 Auriga LLC
+ *
+ * Authors:
+ * Lotosh, Aleksey <aleksey.lotosh@auriga.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BCM2838_GPIO_H
+#define BCM2838_GPIO_H
+
+#include "hw/sd/sd.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2838_GPIO "bcm2838-gpio"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2838GpioState, BCM2838_GPIO)
+
+#define BCM2838_GPIO_REGS_SIZE 0x1000
+#define BCM2838_GPIO_NUM 58
+#define GPIO_PUP_PDN_CNTRL_NUM 4
+
+struct BCM2838GpioState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ /* SDBus selector */
+ SDBus sdbus;
+ SDBus *sdbus_sdhci;
+ SDBus *sdbus_sdhost;
+
+ uint8_t fsel[BCM2838_GPIO_NUM];
+ uint32_t lev0, lev1;
+ uint8_t sd_fsel;
+ qemu_irq out[BCM2838_GPIO_NUM];
+ uint32_t pup_cntrl_reg[GPIO_PUP_PDN_CNTRL_NUM];
+};
+
+#endif
diff --git a/include/hw/gpio/imx_gpio.h b/include/hw/gpio/imx_gpio.h
index ffab437f23..227860b9f0 100644
--- a/include/hw/gpio/imx_gpio.h
+++ b/include/hw/gpio/imx_gpio.h
@@ -21,9 +21,10 @@
#define IMX_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_GPIO "imx.gpio"
-#define IMX_GPIO(obj) OBJECT_CHECK(IMXGPIOState, (obj), TYPE_IMX_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXGPIOState, IMX_GPIO)
#define IMX_GPIO_MEM_SIZE 0x20
@@ -39,7 +40,7 @@
#define IMX_GPIO_PIN_COUNT 32
-typedef struct IMXGPIOState {
+struct IMXGPIOState {
/*< private >*/
SysBusDevice parent_obj;
@@ -58,6 +59,6 @@ typedef struct IMXGPIOState {
qemu_irq irq[2];
qemu_irq output[IMX_GPIO_PIN_COUNT];
-} IMXGPIOState;
+};
#endif /* IMX_GPIO_H */
diff --git a/include/hw/gpio/npcm7xx_gpio.h b/include/hw/gpio/npcm7xx_gpio.h
new file mode 100644
index 0000000000..b1d771bd77
--- /dev/null
+++ b/include/hw/gpio/npcm7xx_gpio.h
@@ -0,0 +1,55 @@
+/*
+ * Nuvoton NPCM7xx General Purpose Input / Output (GPIO)
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef NPCM7XX_GPIO_H
+#define NPCM7XX_GPIO_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/* Number of pins managed by each controller. */
+#define NPCM7XX_GPIO_NR_PINS (32)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_GPIO_NR_REGS (0x80 / sizeof(uint32_t))
+
+typedef struct NPCM7xxGPIOState {
+ SysBusDevice parent;
+
+ /* Properties to be defined by the SoC */
+ uint32_t reset_pu;
+ uint32_t reset_pd;
+ uint32_t reset_osrc;
+ uint32_t reset_odsc;
+
+ MemoryRegion mmio;
+
+ qemu_irq irq;
+ qemu_irq output[NPCM7XX_GPIO_NR_PINS];
+
+ uint32_t pin_level;
+ uint32_t ext_level;
+ uint32_t ext_driven;
+
+ uint32_t regs[NPCM7XX_GPIO_NR_REGS];
+} NPCM7xxGPIOState;
+
+#define TYPE_NPCM7XX_GPIO "npcm7xx-gpio"
+#define NPCM7XX_GPIO(obj) \
+ OBJECT_CHECK(NPCM7xxGPIOState, (obj), TYPE_NPCM7XX_GPIO)
+
+#endif /* NPCM7XX_GPIO_H */
diff --git a/include/hw/gpio/nrf51_gpio.h b/include/hw/gpio/nrf51_gpio.h
index 1d62bbc928..fcfa2bac17 100644
--- a/include/hw/gpio/nrf51_gpio.h
+++ b/include/hw/gpio/nrf51_gpio.h
@@ -27,8 +27,9 @@
#define NRF51_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_NRF51_GPIO "nrf51_soc.gpio"
-#define NRF51_GPIO(obj) OBJECT_CHECK(NRF51GPIOState, (obj), TYPE_NRF51_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51GPIOState, NRF51_GPIO)
#define NRF51_GPIO_PINS 32
@@ -47,7 +48,7 @@
#define NRF51_GPIO_PULLDOWN 1
#define NRF51_GPIO_PULLUP 3
-typedef struct NRF51GPIOState {
+struct NRF51GPIOState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -63,7 +64,8 @@ typedef struct NRF51GPIOState {
uint32_t old_out_connected;
qemu_irq output[NRF51_GPIO_PINS];
-} NRF51GPIOState;
+ qemu_irq detect;
+};
#endif
diff --git a/include/hw/misc/pca9552.h b/include/hw/gpio/pca9552.h
index 600356fbf9..c36525f0c3 100644
--- a/include/hw/misc/pca9552.h
+++ b/include/hw/gpio/pca9552.h
@@ -10,15 +10,18 @@
#define PCA9552_H
#include "hw/i2c/i2c.h"
+#include "qom/object.h"
#define TYPE_PCA9552 "pca9552"
#define TYPE_PCA955X "pca955x"
-#define PCA955X(obj) OBJECT_CHECK(PCA955xState, (obj), TYPE_PCA955X)
+typedef struct PCA955xState PCA955xState;
+DECLARE_INSTANCE_CHECKER(PCA955xState, PCA955X,
+ TYPE_PCA955X)
#define PCA955X_NR_REGS 10
#define PCA955X_PIN_COUNT_MAX 16
-typedef struct PCA955xState {
+struct PCA955xState {
/*< private >*/
I2CSlave i2c;
/*< public >*/
@@ -27,8 +30,9 @@ typedef struct PCA955xState {
uint8_t pointer;
uint8_t regs[PCA955X_NR_REGS];
- qemu_irq gpio[PCA955X_PIN_COUNT_MAX];
+ qemu_irq gpio_out[PCA955X_PIN_COUNT_MAX];
+ uint8_t ext_state[PCA955X_PIN_COUNT_MAX];
char *description; /* For debugging purpose only */
-} PCA955xState;
+};
#endif
diff --git a/include/hw/misc/pca9552_regs.h b/include/hw/gpio/pca9552_regs.h
index d8051cfbd6..d8051cfbd6 100644
--- a/include/hw/misc/pca9552_regs.h
+++ b/include/hw/gpio/pca9552_regs.h
diff --git a/include/hw/gpio/pca9554.h b/include/hw/gpio/pca9554.h
new file mode 100644
index 0000000000..54bfc4c4c7
--- /dev/null
+++ b/include/hw/gpio/pca9554.h
@@ -0,0 +1,36 @@
+/*
+ * PCA9554 I/O port
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef PCA9554_H
+#define PCA9554_H
+
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_PCA9554 "pca9554"
+typedef struct PCA9554State PCA9554State;
+DECLARE_INSTANCE_CHECKER(PCA9554State, PCA9554,
+ TYPE_PCA9554)
+
+#define PCA9554_NR_REGS 4
+#define PCA9554_PIN_COUNT 8
+
+struct PCA9554State {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t pointer;
+
+ uint8_t regs[PCA9554_NR_REGS];
+ qemu_irq gpio_out[PCA9554_PIN_COUNT];
+ uint8_t ext_state[PCA9554_PIN_COUNT];
+ char *description; /* For debugging purpose only */
+};
+
+#endif
diff --git a/include/hw/gpio/pca9554_regs.h b/include/hw/gpio/pca9554_regs.h
new file mode 100644
index 0000000000..602c4a90e0
--- /dev/null
+++ b/include/hw/gpio/pca9554_regs.h
@@ -0,0 +1,19 @@
+/*
+ * PCA9554 I/O port registers
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef PCA9554_REGS_H
+#define PCA9554_REGS_H
+
+/*
+ * Bits [0:1] are used to address a specific register.
+ */
+#define PCA9554_INPUT 0 /* read only input register */
+#define PCA9554_OUTPUT 1 /* read/write pin output state */
+#define PCA9554_POLARITY 2 /* Set polarity of input register */
+#define PCA9554_CONFIG 3 /* Set pins as inputs our ouputs */
+
+#endif
diff --git a/include/hw/gpio/pcf8574.h b/include/hw/gpio/pcf8574.h
new file mode 100644
index 0000000000..3291d7dbbc
--- /dev/null
+++ b/include/hw/gpio/pcf8574.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * NXP PCF8574 8-port I2C GPIO expansion chip.
+ *
+ * Copyright (c) 2024 KNS Group (YADRO).
+ * Written by Dmitrii Sharikhin <d.sharikhin@yadro.com>
+ */
+
+#ifndef _HW_GPIO_PCF8574
+#define _HW_GPIO_PCF8574
+
+#define TYPE_PCF8574 "pcf8574"
+
+#endif /* _HW_GPIO_PCF8574 */
diff --git a/include/hw/riscv/sifive_gpio.h b/include/hw/gpio/sifive_gpio.h
index cf12fcfd62..fc53785c9d 100644
--- a/include/hw/riscv/sifive_gpio.h
+++ b/include/hw/gpio/sifive_gpio.h
@@ -15,9 +15,12 @@
#define SIFIVE_GPIO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_SIFIVE_GPIO "sifive_soc.gpio"
-#define SIFIVE_GPIO(obj) OBJECT_CHECK(SIFIVEGPIOState, (obj), TYPE_SIFIVE_GPIO)
+typedef struct SIFIVEGPIOState SIFIVEGPIOState;
+DECLARE_INSTANCE_CHECKER(SIFIVEGPIOState, SIFIVE_GPIO,
+ TYPE_SIFIVE_GPIO)
#define SIFIVE_GPIO_PINS 32
@@ -41,7 +44,7 @@
#define SIFIVE_GPIO_REG_IOF_SEL 0x03C
#define SIFIVE_GPIO_REG_OUT_XOR 0x040
-typedef struct SIFIVEGPIOState {
+struct SIFIVEGPIOState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -71,6 +74,6 @@ typedef struct SIFIVEGPIOState {
/* config */
uint32_t ngpio;
-} SIFIVEGPIOState;
+};
#endif /* SIFIVE_GPIO_H */
diff --git a/include/hw/gpio/stm32l4x5_gpio.h b/include/hw/gpio/stm32l4x5_gpio.h
new file mode 100644
index 0000000000..878bd19fc9
--- /dev/null
+++ b/include/hw/gpio/stm32l4x5_gpio.h
@@ -0,0 +1,71 @@
+/*
+ * STM32L4x5 GPIO (General Purpose Input/Ouput)
+ *
+ * Copyright (c) 2024 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2024 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_GPIO_H
+#define HW_STM32L4X5_GPIO_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_GPIO "stm32l4x5-gpio"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5GpioState, STM32L4X5_GPIO)
+
+#define NUM_GPIOS 8
+#define GPIO_NUM_PINS 16
+
+struct Stm32l4x5GpioState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ /* GPIO registers */
+ uint32_t moder;
+ uint32_t otyper;
+ uint32_t ospeedr;
+ uint32_t pupdr;
+ uint32_t idr;
+ uint32_t odr;
+ uint32_t lckr;
+ uint32_t afrl;
+ uint32_t afrh;
+ uint32_t ascr;
+
+ /* GPIO registers reset values */
+ uint32_t moder_reset;
+ uint32_t ospeedr_reset;
+ uint32_t pupdr_reset;
+
+ /*
+ * External driving of pins.
+ * The pins can be set externally through the device
+ * anonymous input GPIOs lines under certain conditions.
+ * The pin must not be in push-pull output mode,
+ * and can't be set high in open-drain mode.
+ * Pins driven externally and configured to
+ * output mode will in general be "disconnected"
+ * (see `get_gpio_pinmask_to_disconnect()`)
+ */
+ uint16_t disconnected_pins;
+ uint16_t pins_connected_high;
+
+ char *name;
+ Clock *clk;
+ qemu_irq pin[GPIO_NUM_PINS];
+};
+
+#endif
diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h
index 6321e292fd..a9840ed485 100644
--- a/include/hw/hotplug.h
+++ b/include/hw/hotplug.h
@@ -16,10 +16,9 @@
#define TYPE_HOTPLUG_HANDLER "hotplug-handler"
-#define HOTPLUG_HANDLER_CLASS(klass) \
- OBJECT_CLASS_CHECK(HotplugHandlerClass, (klass), TYPE_HOTPLUG_HANDLER)
-#define HOTPLUG_HANDLER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(HotplugHandlerClass, (obj), TYPE_HOTPLUG_HANDLER)
+typedef struct HotplugHandlerClass HotplugHandlerClass;
+DECLARE_CLASS_CHECKERS(HotplugHandlerClass, HOTPLUG_HANDLER,
+ TYPE_HOTPLUG_HANDLER)
#define HOTPLUG_HANDLER(obj) \
INTERFACE_CHECK(HotplugHandler, (obj), TYPE_HOTPLUG_HANDLER)
@@ -49,8 +48,9 @@ typedef void (*hotplug_fn)(HotplugHandler *plug_handler,
* @unplug: unplug callback.
* Used for device removal with devices that implement
* asynchronous and synchronous (surprise) removal.
+ * @is_hotpluggable_bus: called to check if bus/its parent allow hotplug on bus
*/
-typedef struct HotplugHandlerClass {
+struct HotplugHandlerClass {
/* <private> */
InterfaceClass parent;
@@ -59,7 +59,8 @@ typedef struct HotplugHandlerClass {
hotplug_fn plug;
hotplug_fn unplug_request;
hotplug_fn unplug;
-} HotplugHandlerClass;
+ bool (*is_hotpluggable_bus)(HotplugHandler *plug_handler, BusState *bus);
+};
/**
* hotplug_handler_plug:
diff --git a/include/hw/hw.h b/include/hw/hw.h
index fc5301f293..045c1c8b09 100644
--- a/include/hw/hw.h
+++ b/include/hw/hw.h
@@ -5,6 +5,6 @@
#error Cannot include hw/hw.h from user emulation
#endif
-void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+G_NORETURN void hw_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#endif
diff --git a/include/hw/hyperv/dynmem-proto.h b/include/hw/hyperv/dynmem-proto.h
new file mode 100644
index 0000000000..68b8b606f2
--- /dev/null
+++ b/include/hw/hyperv/dynmem-proto.h
@@ -0,0 +1,430 @@
+#ifndef HW_HYPERV_DYNMEM_PROTO_H
+#define HW_HYPERV_DYNMEM_PROTO_H
+
+/*
+ * Hyper-V Dynamic Memory Protocol definitions
+ *
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * Based on drivers/hv/hv_balloon.c from Linux kernel:
+ * Copyright (c) 2012, Microsoft Corporation.
+ *
+ * Author: K. Y. Srinivasan <kys@microsoft.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the major
+ * version.
+ *
+ * History:
+ * Initial version 1.0
+ * Changed to 0.1 on 2009/03/25
+ * Changes to 0.2 on 2009/05/14
+ * Changes to 0.3 on 2009/12/03
+ * Changed to 1.0 on 2011/04/05
+ * Changed to 2.0 on 2019/12/10
+ */
+
+#define DYNMEM_MAKE_VERSION(Major, Minor) ((uint32_t)(((Major) << 16) | (Minor)))
+#define DYNMEM_MAJOR_VERSION(Version) ((uint32_t)(Version) >> 16)
+#define DYNMEM_MINOR_VERSION(Version) ((uint32_t)(Version) & 0xff)
+
+enum {
+ DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
+ DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+ DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
+
+ DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
+ DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+ DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
+
+ DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
+};
+
+
+
+/*
+ * Message Types
+ */
+
+enum dm_message_type {
+ /*
+ * Version 0.3
+ */
+ DM_ERROR = 0,
+ DM_VERSION_REQUEST = 1,
+ DM_VERSION_RESPONSE = 2,
+ DM_CAPABILITIES_REPORT = 3,
+ DM_CAPABILITIES_RESPONSE = 4,
+ DM_STATUS_REPORT = 5,
+ DM_BALLOON_REQUEST = 6,
+ DM_BALLOON_RESPONSE = 7,
+ DM_UNBALLOON_REQUEST = 8,
+ DM_UNBALLOON_RESPONSE = 9,
+ DM_MEM_HOT_ADD_REQUEST = 10,
+ DM_MEM_HOT_ADD_RESPONSE = 11,
+ DM_VERSION_03_MAX = 11,
+ /*
+ * Version 1.0.
+ */
+ DM_INFO_MESSAGE = 12,
+ DM_VERSION_1_MAX = 12,
+
+ /*
+ * Version 2.0
+ */
+ DM_MEM_HOT_REMOVE_REQUEST = 13,
+ DM_MEM_HOT_REMOVE_RESPONSE = 14
+};
+
+
+/*
+ * Structures defining the dynamic memory management
+ * protocol.
+ */
+
+union dm_version {
+ struct {
+ uint16_t minor_version;
+ uint16_t major_version;
+ };
+ uint32_t version;
+} QEMU_PACKED;
+
+
+union dm_caps {
+ struct {
+ uint64_t balloon:1;
+ uint64_t hot_add:1;
+ /*
+ * To support guests that may have alignment
+ * limitations on hot-add, the guest can specify
+ * its alignment requirements; a value of n
+ * represents an alignment of 2^n in mega bytes.
+ */
+ uint64_t hot_add_alignment:4;
+ uint64_t hot_remove:1;
+ uint64_t reservedz:57;
+ } cap_bits;
+ uint64_t caps;
+} QEMU_PACKED;
+
+union dm_mem_page_range {
+ struct {
+ /*
+ * The PFN number of the first page in the range.
+ * 40 bits is the architectural limit of a PFN
+ * number for AMD64.
+ */
+ uint64_t start_page:40;
+ /*
+ * The number of pages in the range.
+ */
+ uint64_t page_cnt:24;
+ } finfo;
+ uint64_t page_range;
+} QEMU_PACKED;
+
+
+
+/*
+ * The header for all dynamic memory messages:
+ *
+ * type: Type of the message.
+ * size: Size of the message in bytes; including the header.
+ * trans_id: The guest is responsible for manufacturing this ID.
+ */
+
+struct dm_header {
+ uint16_t type;
+ uint16_t size;
+ uint32_t trans_id;
+} QEMU_PACKED;
+
+/*
+ * A generic message format for dynamic memory.
+ * Specific message formats are defined later in the file.
+ */
+
+struct dm_message {
+ struct dm_header hdr;
+ uint8_t data[]; /* enclosed message */
+} QEMU_PACKED;
+
+
+/*
+ * Specific message types supporting the dynamic memory protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * dm_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_request {
+ struct dm_header hdr;
+ union dm_version version;
+ uint32_t is_last_attempt:1;
+ uint32_t reservedz:31;
+} QEMU_PACKED;
+
+/*
+ * Version response message; Host to Guest and indicates
+ * if the host has accepted the version sent by the guest.
+ *
+ * is_accepted: If TRUE, host has accepted the version and the guest
+ * should proceed to the next stage of the protocol. FALSE indicates that
+ * guest should re-try with a different version.
+ *
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_response {
+ struct dm_header hdr;
+ uint64_t is_accepted:1;
+ uint64_t reservedz:63;
+} QEMU_PACKED;
+
+/*
+ * Message reporting capabilities. This is sent from the guest to the
+ * host.
+ */
+
+struct dm_capabilities {
+ struct dm_header hdr;
+ union dm_caps caps;
+ uint64_t min_page_cnt;
+ uint64_t max_page_number;
+} QEMU_PACKED;
+
+/*
+ * Response to the capabilities message. This is sent from the host to the
+ * guest. This message notifies if the host has accepted the guest's
+ * capabilities. If the host has not accepted, the guest must shutdown
+ * the service.
+ *
+ * is_accepted: Indicates if the host has accepted guest's capabilities.
+ * reservedz: Must be 0.
+ */
+
+struct dm_capabilities_resp_msg {
+ struct dm_header hdr;
+ uint64_t is_accepted:1;
+ uint64_t hot_remove:1;
+ uint64_t suppress_pressure_reports:1;
+ uint64_t reservedz:61;
+} QEMU_PACKED;
+
+/*
+ * This message is used to report memory pressure from the guest.
+ * This message is not part of any transaction and there is no
+ * response to this message.
+ *
+ * num_avail: Available memory in pages.
+ * num_committed: Committed memory in pages.
+ * page_file_size: The accumulated size of all page files
+ * in the system in pages.
+ * zero_free: The number of zero and free pages.
+ * page_file_writes: The writes to the page file in pages.
+ * io_diff: An indicator of file cache efficiency or page file activity,
+ * calculated as File Cache Page Fault Count - Page Read Count.
+ * This value is in pages.
+ *
+ * Some of these metrics are Windows specific and fortunately
+ * the algorithm on the host side that computes the guest memory
+ * pressure only uses num_committed value.
+ */
+
+struct dm_status {
+ struct dm_header hdr;
+ uint64_t num_avail;
+ uint64_t num_committed;
+ uint64_t page_file_size;
+ uint64_t zero_free;
+ uint32_t page_file_writes;
+ uint32_t io_diff;
+} QEMU_PACKED;
+
+
+/*
+ * Message to ask the guest to allocate memory - balloon up message.
+ * This message is sent from the host to the guest. The guest may not be
+ * able to allocate as much memory as requested.
+ *
+ * num_pages: number of pages to allocate.
+ */
+
+struct dm_balloon {
+ struct dm_header hdr;
+ uint32_t num_pages;
+ uint32_t reservedz;
+} QEMU_PACKED;
+
+
+/*
+ * Balloon response message; this message is sent from the guest
+ * to the host in response to the balloon message.
+ *
+ * reservedz: Reserved; must be set to zero.
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will be at least one more message from the guest.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_balloon_response {
+ struct dm_header hdr;
+ uint32_t reservedz;
+ uint32_t more_pages:1;
+ uint32_t range_count:31;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+/*
+ * Un-balloon message; this message is sent from the host
+ * to the guest to give guest more memory.
+ *
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will be at least one more message from the guest.
+ *
+ * reservedz: Reserved; must be set to zero.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_unballoon_request {
+ struct dm_header hdr;
+ uint32_t more_pages:1;
+ uint32_t reservedz:31;
+ uint32_t range_count;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+/*
+ * Un-balloon response message; this message is sent from the guest
+ * to the host in response to an unballoon request.
+ *
+ */
+
+struct dm_unballoon_response {
+ struct dm_header hdr;
+} QEMU_PACKED;
+
+
+/*
+ * Hot add request message. Message sent from the host to the guest.
+ *
+ * range: Memory range to hot add.
+ * region: Explicit hot add memory region for guest to use. Optional.
+ *
+ */
+
+struct dm_hot_add {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+} QEMU_PACKED;
+
+struct dm_hot_add_with_region {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+ union dm_mem_page_range region;
+} QEMU_PACKED;
+
+/*
+ * Hot add response message.
+ * This message is sent by the guest to report the status of a hot add request.
+ * If page_count is less than the requested page count, then the host should
+ * assume all further hot add requests will fail, since this indicates that
+ * the guest has hit an upper physical memory barrier.
+ *
+ * Hot adds may also fail due to low resources; in this case, the guest must
+ * not complete this message until the hot add can succeed, and the host must
+ * not send a new hot add request until the response is sent.
+ * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
+ * times it fails the request.
+ *
+ *
+ * page_count: number of pages that were successfully hot added.
+ *
+ * result: result of the operation 1: success, 0: failure.
+ *
+ */
+
+struct dm_hot_add_response {
+ struct dm_header hdr;
+ uint32_t page_count;
+ uint32_t result;
+} QEMU_PACKED;
+
+struct dm_hot_remove {
+ struct dm_header hdr;
+ uint32_t virtual_node;
+ uint32_t page_count;
+ uint32_t qos_flags;
+ uint32_t reservedZ;
+} QEMU_PACKED;
+
+struct dm_hot_remove_response {
+ struct dm_header hdr;
+ uint32_t result;
+ uint32_t range_count;
+ uint64_t more_pages:1;
+ uint64_t reservedz:63;
+ union dm_mem_page_range range_array[];
+} QEMU_PACKED;
+
+#define DM_REMOVE_QOS_LARGE (1 << 0)
+#define DM_REMOVE_QOS_LOCAL (1 << 1)
+#define DM_REMOVE_QOS_MASK (0x3)
+
+/*
+ * Types of information sent from host to the guest.
+ */
+
+enum dm_info_type {
+ INFO_TYPE_MAX_PAGE_CNT = 0,
+ MAX_INFO_TYPE
+};
+
+
+/*
+ * Header for the information message.
+ */
+
+struct dm_info_header {
+ enum dm_info_type type;
+ uint32_t data_size;
+ uint8_t data[];
+} QEMU_PACKED;
+
+/*
+ * This message is sent from the host to the guest to pass
+ * some relevant information (win8 addition).
+ *
+ * reserved: no used.
+ * info_size: size of the information blob.
+ * info: information blob.
+ */
+
+struct dm_info_msg {
+ struct dm_header hdr;
+ uint32_t reserved;
+ uint32_t info_size;
+ uint8_t info[];
+};
+
+#endif
diff --git a/include/hw/hyperv/hv-balloon.h b/include/hw/hyperv/hv-balloon.h
new file mode 100644
index 0000000000..c1efe70fc2
--- /dev/null
+++ b/include/hw/hyperv/hv-balloon.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU Hyper-V Dynamic Memory Protocol driver
+ *
+ * Copyright (C) 2020-2023 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_HV_BALLOON_H
+#define HW_HV_BALLOON_H
+
+#include "qom/object.h"
+
+#define TYPE_HV_BALLOON "hv-balloon"
+OBJECT_DECLARE_SIMPLE_TYPE(HvBalloon, HV_BALLOON)
+
+#endif
diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h
index 21dc28aee9..4a2297307b 100644
--- a/include/hw/hyperv/hyperv-proto.h
+++ b/include/hw/hyperv/hyperv-proto.h
@@ -24,12 +24,17 @@
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_NOT_ACKNOWLEDGED 20
+#define HV_STATUS_NO_DATA 27
/*
* Hypercall numbers
*/
#define HV_POST_MESSAGE 0x005c
#define HV_SIGNAL_EVENT 0x005d
+#define HV_POST_DEBUG_DATA 0x0069
+#define HV_RETRIEVE_DEBUG_DATA 0x006a
+#define HV_RESET_DEBUG_SESSION 0x006b
#define HV_HYPERCALL_FAST (1u << 16)
/*
@@ -127,4 +132,51 @@ struct hyperv_event_flags_page {
struct hyperv_event_flags slot[HV_SINT_COUNT];
};
+/*
+ * Kernel debugger structures
+ */
+
+/* Options flags for hyperv_reset_debug_session */
+#define HV_DEBUG_PURGE_INCOMING_DATA 0x00000001
+#define HV_DEBUG_PURGE_OUTGOING_DATA 0x00000002
+struct hyperv_reset_debug_session_input {
+ uint32_t options;
+} __attribute__ ((__packed__));
+
+struct hyperv_reset_debug_session_output {
+ uint32_t host_ip;
+ uint32_t target_ip;
+ uint16_t host_port;
+ uint16_t target_port;
+ uint8_t host_mac[6];
+ uint8_t target_mac[6];
+} __attribute__ ((__packed__));
+
+/* Options for hyperv_post_debug_data */
+#define HV_DEBUG_POST_LOOP 0x00000001
+
+struct hyperv_post_debug_data_input {
+ uint32_t count;
+ uint32_t options;
+ /*uint8_t data[HV_HYP_PAGE_SIZE - 2 * sizeof(uint32_t)];*/
+} __attribute__ ((__packed__));
+
+struct hyperv_post_debug_data_output {
+ uint32_t pending_count;
+} __attribute__ ((__packed__));
+
+/* Options for hyperv_retrieve_debug_data */
+#define HV_DEBUG_RETRIEVE_LOOP 0x00000001
+#define HV_DEBUG_RETRIEVE_TEST_ACTIVITY 0x00000002
+
+struct hyperv_retrieve_debug_data_input {
+ uint32_t count;
+ uint32_t options;
+ uint64_t timeout;
+} __attribute__ ((__packed__));
+
+struct hyperv_retrieve_debug_data_output {
+ uint32_t retrieved_count;
+ uint32_t remaining_count;
+} __attribute__ ((__packed__));
#endif
diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h
index a63ee0003c..d717b4e13d 100644
--- a/include/hw/hyperv/hyperv.h
+++ b/include/hw/hyperv/hyperv.h
@@ -81,4 +81,66 @@ void hyperv_synic_update(CPUState *cs, bool enable,
hwaddr msg_page_addr, hwaddr event_page_addr);
bool hyperv_is_synic_enabled(void);
+/*
+ * Process HVCALL_RESET_DEBUG_SESSION hypercall.
+ */
+uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa);
+/*
+ * Process HVCALL_RETREIVE_DEBUG_DATA hypercall.
+ */
+uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
+ bool fast);
+/*
+ * Process HVCALL_POST_DEBUG_DATA hypercall.
+ */
+uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast);
+
+uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count);
+uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count);
+void hyperv_syndbg_set_pending_page(uint64_t ingpa);
+uint64_t hyperv_syndbg_query_options(void);
+
+typedef enum HvSynthDbgMsgType {
+ HV_SYNDBG_MSG_CONNECTION_INFO,
+ HV_SYNDBG_MSG_SEND,
+ HV_SYNDBG_MSG_RECV,
+ HV_SYNDBG_MSG_SET_PENDING_PAGE,
+ HV_SYNDBG_MSG_QUERY_OPTIONS
+} HvDbgSynthMsgType;
+
+typedef struct HvSynDbgMsg {
+ HvDbgSynthMsgType type;
+ union {
+ struct {
+ uint32_t host_ip;
+ uint16_t host_port;
+ } connection_info;
+ struct {
+ uint64_t buf_gpa;
+ uint32_t count;
+ uint32_t pending_count;
+ bool is_raw;
+ } send;
+ struct {
+ uint64_t buf_gpa;
+ uint32_t count;
+ uint32_t options;
+ uint64_t timeout;
+ uint32_t retrieved_count;
+ bool is_raw;
+ } recv;
+ struct {
+ uint64_t buf_gpa;
+ } pending_page;
+ struct {
+ uint64_t options;
+ } query_options;
+ } u;
+} HvSynDbgMsg;
+typedef uint16_t (*HvSynDbgHandler)(void *context, HvSynDbgMsg *msg);
+void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context);
+
+bool hyperv_are_vmbus_recommended_features_enabled(void);
+void hyperv_set_vmbus_recommended_features_enabled(void);
+
#endif
diff --git a/include/hw/hyperv/vmbus-bridge.h b/include/hw/hyperv/vmbus-bridge.h
index 33f93de64d..1e5419574e 100644
--- a/include/hw/hyperv/vmbus-bridge.h
+++ b/include/hw/hyperv/vmbus-bridge.h
@@ -11,20 +11,20 @@
#define HW_HYPERV_VMBUS_BRIDGE_H
#include "hw/sysbus.h"
+#include "hw/hyperv/vmbus.h"
+#include "qom/object.h"
#define TYPE_VMBUS_BRIDGE "vmbus-bridge"
-typedef struct VMBus VMBus;
-
-typedef struct VMBusBridge {
+struct VMBusBridge {
SysBusDevice parent_obj;
uint8_t irq;
VMBus *bus;
-} VMBusBridge;
+};
-#define VMBUS_BRIDGE(obj) OBJECT_CHECK(VMBusBridge, (obj), TYPE_VMBUS_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(VMBusBridge, VMBUS_BRIDGE)
static inline VMBusBridge *vmbus_bridge_find(void)
{
diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h
index 40e8417eec..5c505852f2 100644
--- a/include/hw/hyperv/vmbus.h
+++ b/include/hw/hyperv/vmbus.h
@@ -16,15 +16,15 @@
#include "migration/vmstate.h"
#include "hw/hyperv/vmbus-proto.h"
#include "qemu/uuid.h"
+#include "qom/object.h"
#define TYPE_VMBUS_DEVICE "vmbus-dev"
-#define VMBUS_DEVICE(obj) \
- OBJECT_CHECK(VMBusDevice, (obj), TYPE_VMBUS_DEVICE)
-#define VMBUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VMBusDeviceClass, (klass), TYPE_VMBUS_DEVICE)
-#define VMBUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VMBusDeviceClass, (obj), TYPE_VMBUS_DEVICE)
+OBJECT_DECLARE_TYPE(VMBusDevice, VMBusDeviceClass,
+ VMBUS_DEVICE)
+
+#define TYPE_VMBUS "vmbus"
+OBJECT_DECLARE_SIMPLE_TYPE(VMBus, VMBUS)
/*
* Object wrapping a GPADL -- GPA Descriptor List -- an array of guest physical
@@ -40,11 +40,10 @@ typedef struct VMBusChannel VMBusChannel;
* Base class for VMBus devices. Includes one or more channels. Identified by
* class GUID and instance GUID.
*/
-typedef struct VMBusDevice VMBusDevice;
typedef void(*VMBusChannelNotifyCb)(struct VMBusChannel *chan);
-typedef struct VMBusDeviceClass {
+struct VMBusDeviceClass {
DeviceClass parent;
QemuUUID classid;
@@ -52,7 +51,7 @@ typedef struct VMBusDeviceClass {
uint16_t channel_flags;
uint16_t mmio_size_mb;
- /* Extentions to standard device callbacks */
+ /* Extensions to standard device callbacks */
void (*vmdev_realize)(VMBusDevice *vdev, Error **errp);
void (*vmdev_unrealize)(VMBusDevice *vdev);
void (*vmdev_reset)(VMBusDevice *vdev);
@@ -76,7 +75,7 @@ typedef struct VMBusDeviceClass {
* side, when there's work to do with the data in the channel ring buffers.
*/
VMBusChannelNotifyCb chan_notify_cb;
-} VMBusDeviceClass;
+};
struct VMBusDevice {
DeviceState parent;
@@ -224,7 +223,4 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov,
unsigned iov_cnt, size_t accessed);
-void vmbus_save_req(QEMUFile *f, VMBusChanReq *req);
-void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size);
-
#endif
diff --git a/include/hw/i2c/allwinner-i2c.h b/include/hw/i2c/allwinner-i2c.h
new file mode 100644
index 0000000000..0e325d265e
--- /dev/null
+++ b/include/hw/i2c/allwinner-i2c.h
@@ -0,0 +1,61 @@
+/*
+ * Allwinner I2C Bus Serial Interface registers definition
+ *
+ * Copyright (C) 2022 Strahinja Jankovic. <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from IMX I2C controller,
+ * by Jean-Christophe DUBOIS .
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef ALLWINNER_I2C_H
+#define ALLWINNER_I2C_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_AW_I2C "allwinner.i2c"
+
+/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */
+#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i"
+
+OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C)
+
+#define AW_I2C_MEM_SIZE 0x24
+
+struct AWI2CState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint8_t addr;
+ uint8_t xaddr;
+ uint8_t data;
+ uint8_t cntr;
+ uint8_t stat;
+ uint8_t ccr;
+ uint8_t srst;
+ uint8_t efr;
+ uint8_t lcr;
+
+ bool irq_clear_inverted;
+};
+
+#endif /* ALLWINNER_I2C_H */
diff --git a/include/hw/i2c/arm_sbcon_i2c.h b/include/hw/i2c/arm_sbcon_i2c.h
index 5d96507ab6..da9b5e8f83 100644
--- a/include/hw/i2c/arm_sbcon_i2c.h
+++ b/include/hw/i2c/arm_sbcon_i2c.h
@@ -9,19 +9,20 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef HW_I2C_ARM_SBCON_H
-#define HW_I2C_ARM_SBCON_H
+
+#ifndef HW_I2C_ARM_SBCON_I2C_H
+#define HW_I2C_ARM_SBCON_I2C_H
#include "hw/sysbus.h"
#include "hw/i2c/bitbang_i2c.h"
+#include "qom/object.h"
-#define TYPE_VERSATILE_I2C "versatile_i2c"
-#define TYPE_ARM_SBCON_I2C TYPE_VERSATILE_I2C
+#define TYPE_ARM_SBCON_I2C "versatile_i2c"
-#define ARM_SBCON_I2C(obj) \
- OBJECT_CHECK(ArmSbconI2CState, (obj), TYPE_ARM_SBCON_I2C)
+typedef struct ArmSbconI2CState ArmSbconI2CState;
+DECLARE_INSTANCE_CHECKER(ArmSbconI2CState, ARM_SBCON_I2C, TYPE_ARM_SBCON_I2C)
-typedef struct ArmSbconI2CState {
+struct ArmSbconI2CState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -30,6 +31,6 @@ typedef struct ArmSbconI2CState {
bitbang_i2c_interface bitbang;
int out;
int in;
-} ArmSbconI2CState;
+};
-#endif /* HW_I2C_ARM_SBCON_H */
+#endif /* HW_I2C_ARM_SBCON_I2C_H */
diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h
index 243789ae5d..a064479e59 100644
--- a/include/hw/i2c/aspeed_i2c.h
+++ b/include/hw/i2c/aspeed_i2c.h
@@ -23,40 +23,231 @@
#include "hw/i2c/i2c.h"
#include "hw/sysbus.h"
+#include "hw/registerfields.h"
+#include "qom/object.h"
#define TYPE_ASPEED_I2C "aspeed.i2c"
#define TYPE_ASPEED_2400_I2C TYPE_ASPEED_I2C "-ast2400"
#define TYPE_ASPEED_2500_I2C TYPE_ASPEED_I2C "-ast2500"
#define TYPE_ASPEED_2600_I2C TYPE_ASPEED_I2C "-ast2600"
-#define ASPEED_I2C(obj) \
- OBJECT_CHECK(AspeedI2CState, (obj), TYPE_ASPEED_I2C)
+#define TYPE_ASPEED_1030_I2C TYPE_ASPEED_I2C "-ast1030"
+OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C)
#define ASPEED_I2C_NR_BUSSES 16
#define ASPEED_I2C_MAX_POOL_SIZE 0x800
+#define ASPEED_I2C_OLD_NUM_REG 11
+#define ASPEED_I2C_NEW_NUM_REG 22
+
+#define A_I2CD_M_STOP_CMD BIT(5)
+#define A_I2CD_M_RX_CMD BIT(3)
+#define A_I2CD_M_TX_CMD BIT(1)
+#define A_I2CD_M_START_CMD BIT(0)
+
+#define A_I2CD_MASTER_EN BIT(0)
+
+/* Tx State Machine */
+#define I2CD_TX_STATE_MASK 0xf
+#define I2CD_IDLE 0x0
+#define I2CD_MACTIVE 0x8
+#define I2CD_MSTART 0x9
+#define I2CD_MSTARTR 0xa
+#define I2CD_MSTOP 0xb
+#define I2CD_MTXD 0xc
+#define I2CD_MRXACK 0xd
+#define I2CD_MRXD 0xe
+#define I2CD_MTXACK 0xf
+#define I2CD_SWAIT 0x1
+#define I2CD_SRXD 0x4
+#define I2CD_STXACK 0x5
+#define I2CD_STXD 0x6
+#define I2CD_SRXACK 0x7
+#define I2CD_RECOVER 0x3
+
+/* I2C Global Register */
+REG32(I2C_CTRL_STATUS, 0x0) /* Device Interrupt Status */
+REG32(I2C_CTRL_ASSIGN, 0x8) /* Device Interrupt Target Assignment */
+REG32(I2C_CTRL_GLOBAL, 0xC) /* Global Control Register */
+ FIELD(I2C_CTRL_GLOBAL, REG_MODE, 2, 1)
+ FIELD(I2C_CTRL_GLOBAL, SRAM_EN, 0, 1)
+REG32(I2C_CTRL_NEW_CLK_DIVIDER, 0x10) /* New mode clock divider */
+
+/* I2C Old Mode Device (Bus) Register */
+REG32(I2CD_FUN_CTRL, 0x0) /* I2CD Function Control */
+ FIELD(I2CD_FUN_CTRL, POOL_PAGE_SEL, 20, 3) /* AST2400 */
+ SHARED_FIELD(M_SDA_LOCK_EN, 16, 1)
+ SHARED_FIELD(MULTI_MASTER_DIS, 15, 1)
+ SHARED_FIELD(M_SCL_DRIVE_EN, 14, 1)
+ SHARED_FIELD(MSB_STS, 9, 1)
+ SHARED_FIELD(SDA_DRIVE_IT_EN, 8, 1)
+ SHARED_FIELD(M_SDA_DRIVE_IT_EN, 7, 1)
+ SHARED_FIELD(M_HIGH_SPEED_EN, 6, 1)
+ SHARED_FIELD(DEF_ADDR_EN, 5, 1)
+ SHARED_FIELD(DEF_ALERT_EN, 4, 1)
+ SHARED_FIELD(DEF_ARP_EN, 3, 1)
+ SHARED_FIELD(DEF_GCALL_EN, 2, 1)
+ SHARED_FIELD(SLAVE_EN, 1, 1)
+ SHARED_FIELD(MASTER_EN, 0, 1)
+REG32(I2CD_AC_TIMING1, 0x04) /* Clock and AC Timing Control #1 */
+REG32(I2CD_AC_TIMING2, 0x08) /* Clock and AC Timing Control #2 */
+REG32(I2CD_INTR_CTRL, 0x0C) /* I2CD Interrupt Control */
+REG32(I2CD_INTR_STS, 0x10) /* I2CD Interrupt Status */
+ SHARED_FIELD(SLAVE_ADDR_MATCH, 31, 1) /* 0: addr1 1: addr2 */
+ SHARED_FIELD(SLAVE_ADDR_RX_PENDING, 29, 1)
+ SHARED_FIELD(SLAVE_INACTIVE_TIMEOUT, 15, 1)
+ SHARED_FIELD(SDA_DL_TIMEOUT, 14, 1)
+ SHARED_FIELD(BUS_RECOVER_DONE, 13, 1)
+ SHARED_FIELD(SMBUS_ALERT, 12, 1) /* Bus [0-3] only */
+ FIELD(I2CD_INTR_STS, SMBUS_ARP_ADDR, 11, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SMBUS_DEV_ALERT_ADDR, 10, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SMBUS_DEF_ADDR, 9, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, GCALL_ADDR, 8, 1) /* Removed */
+ FIELD(I2CD_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1) /* use RX_DONE */
+ SHARED_FIELD(SCL_TIMEOUT, 6, 1)
+ SHARED_FIELD(ABNORMAL, 5, 1)
+ SHARED_FIELD(NORMAL_STOP, 4, 1)
+ SHARED_FIELD(ARBIT_LOSS, 3, 1)
+ SHARED_FIELD(RX_DONE, 2, 1)
+ SHARED_FIELD(TX_NAK, 1, 1)
+ SHARED_FIELD(TX_ACK, 0, 1)
+REG32(I2CD_CMD, 0x14) /* I2CD Command/Status */
+ SHARED_FIELD(SDA_OE, 28, 1)
+ SHARED_FIELD(SDA_O, 27, 1)
+ SHARED_FIELD(SCL_OE, 26, 1)
+ SHARED_FIELD(SCL_O, 25, 1)
+ SHARED_FIELD(TX_TIMING, 23, 2)
+ SHARED_FIELD(TX_STATE, 19, 4)
+ SHARED_FIELD(SCL_LINE_STS, 18, 1)
+ SHARED_FIELD(SDA_LINE_STS, 17, 1)
+ SHARED_FIELD(BUS_BUSY_STS, 16, 1)
+ SHARED_FIELD(SDA_OE_OUT_DIR, 15, 1)
+ SHARED_FIELD(SDA_O_OUT_DIR, 14, 1)
+ SHARED_FIELD(SCL_OE_OUT_DIR, 13, 1)
+ SHARED_FIELD(SCL_O_OUT_DIR, 12, 1)
+ SHARED_FIELD(BUS_RECOVER_CMD_EN, 11, 1)
+ SHARED_FIELD(S_ALT_EN, 10, 1)
+ /* Command Bits */
+ SHARED_FIELD(RX_DMA_EN, 9, 1)
+ SHARED_FIELD(TX_DMA_EN, 8, 1)
+ SHARED_FIELD(RX_BUFF_EN, 7, 1)
+ SHARED_FIELD(TX_BUFF_EN, 6, 1)
+ SHARED_FIELD(M_STOP_CMD, 5, 1)
+ SHARED_FIELD(M_S_RX_CMD_LAST, 4, 1)
+ SHARED_FIELD(M_RX_CMD, 3, 1)
+ SHARED_FIELD(S_TX_CMD, 2, 1)
+ SHARED_FIELD(M_TX_CMD, 1, 1)
+ SHARED_FIELD(M_START_CMD, 0, 1)
+REG32(I2CD_DEV_ADDR, 0x18) /* Slave Device Address */
+ SHARED_FIELD(SLAVE_DEV_ADDR1, 0, 7)
+REG32(I2CD_POOL_CTRL, 0x1C) /* Pool Buffer Control */
+ SHARED_FIELD(RX_COUNT, 24, 6)
+ SHARED_FIELD(RX_SIZE, 16, 5)
+ SHARED_FIELD(TX_COUNT, 8, 5)
+ FIELD(I2CD_POOL_CTRL, OFFSET, 2, 6) /* AST2400 */
+ SHARED_FIELD(BUF_ORGANIZATION, 0, 1) /* AST2600 */
+REG32(I2CD_BYTE_BUF, 0x20) /* Transmit/Receive Byte Buffer */
+ SHARED_FIELD(RX_BUF, 8, 8)
+ SHARED_FIELD(TX_BUF, 0, 8)
+REG32(I2CD_DMA_ADDR, 0x24) /* DMA Buffer Address */
+REG32(I2CD_DMA_LEN, 0x28) /* DMA Transfer Length < 4KB */
+
+/* I2C New Mode Device (Bus) Register */
+REG32(I2CC_FUN_CTRL, 0x0)
+ FIELD(I2CC_FUN_CTRL, RB_EARLY_DONE_EN, 22, 1)
+ FIELD(I2CC_FUN_CTRL, DMA_DIS_AUTO_RECOVER, 21, 1)
+ FIELD(I2CC_FUN_CTRL, S_SAVE_ADDR, 20, 1)
+ FIELD(I2CC_FUN_CTRL, M_PKT_RETRY_CNT, 18, 2)
+ /* 17:0 shared with I2CD_FUN_CTRL[17:0] */
+REG32(I2CC_AC_TIMING, 0x04)
+REG32(I2CC_MS_TXRX_BYTE_BUF, 0x08)
+ /* 31:16 shared with I2CD_CMD[31:16] */
+ /* 15:0 shared with I2CD_BYTE_BUF[15:0] */
+REG32(I2CC_POOL_CTRL, 0x0c)
+ /* 31:0 shared with I2CD_POOL_CTRL[31:0] */
+REG32(I2CM_INTR_CTRL, 0x10)
+REG32(I2CM_INTR_STS, 0x14)
+ FIELD(I2CM_INTR_STS, PKT_STATE, 28, 4)
+ FIELD(I2CM_INTR_STS, PKT_CMD_TIMEOUT, 18, 1)
+ FIELD(I2CM_INTR_STS, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CM_INTR_STS, PKT_CMD_DONE, 16, 1)
+ FIELD(I2CM_INTR_STS, BUS_RECOVER_FAIL, 15, 1)
+ /* 14:0 shared with I2CD_INTR_STS[14:0] */
+REG32(I2CM_CMD, 0x18)
+ FIELD(I2CM_CMD, W1_CTRL, 31, 1)
+ FIELD(I2CM_CMD, PKT_DEV_ADDR, 24, 7)
+ FIELD(I2CM_CMD, HS_MASTER_MODE_LSB, 17, 3)
+ FIELD(I2CM_CMD, PKT_OP_EN, 16, 1)
+ /* 15:0 shared with I2CD_CMD[15:0] */
+REG32(I2CM_DMA_LEN, 0x1c)
+ FIELD(I2CM_DMA_LEN, RX_BUF_LEN_W1T, 31, 1)
+ FIELD(I2CM_DMA_LEN, RX_BUF_LEN, 16, 11)
+ FIELD(I2CM_DMA_LEN, TX_BUF_LEN_W1T, 15, 1)
+ FIELD(I2CM_DMA_LEN, TX_BUF_LEN, 0, 11)
+REG32(I2CS_INTR_CTRL, 0x20)
+ FIELD(I2CS_INTR_CTRL, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CS_INTR_CTRL, PKT_CMD_DONE, 16, 1)
+REG32(I2CS_INTR_STS, 0x24)
+ /* 31:29 shared with I2CD_INTR_STS[31:29] */
+ FIELD(I2CS_INTR_STS, SLAVE_PARKING_STS, 24, 2)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR3_NAK, 22, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR2_NAK, 21, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR1_NAK, 20, 1)
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR_INDICATOR, 18, 2)
+ FIELD(I2CS_INTR_STS, PKT_CMD_FAIL, 17, 1)
+ FIELD(I2CS_INTR_STS, PKT_CMD_DONE, 16, 1)
+ /* 14:0 shared with I2CD_INTR_STS[14:0] */
+ FIELD(I2CS_INTR_STS, SLAVE_ADDR_RX_MATCH, 7, 1)
+REG32(I2CS_CMD, 0x28)
+ FIELD(I2CS_CMD, W1_CTRL, 31, 1)
+ FIELD(I2CS_CMD, PKT_MODE_ACTIVE_ADDR, 17, 2)
+ FIELD(I2CS_CMD, PKT_MODE_EN, 16, 1)
+ FIELD(I2CS_CMD, AUTO_NAK_INACTIVE_ADDR, 15, 1)
+ FIELD(I2CS_CMD, AUTO_NAK_ACTIVE_ADDR, 14, 1)
+ /* 13:0 shared with I2CD_CMD[13:0] */
+REG32(I2CS_DMA_LEN, 0x2c)
+ FIELD(I2CS_DMA_LEN, RX_BUF_LEN_W1T, 31, 1)
+ FIELD(I2CS_DMA_LEN, RX_BUF_LEN, 16, 11)
+ FIELD(I2CS_DMA_LEN, TX_BUF_LEN_W1T, 15, 1)
+ FIELD(I2CS_DMA_LEN, TX_BUF_LEN, 0, 11)
+REG32(I2CM_DMA_TX_ADDR, 0x30)
+ FIELD(I2CM_DMA_TX_ADDR, ADDR, 0, 31)
+REG32(I2CM_DMA_RX_ADDR, 0x34)
+ FIELD(I2CM_DMA_RX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DMA_TX_ADDR, 0x38)
+ FIELD(I2CS_DMA_TX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DMA_RX_ADDR, 0x3c)
+ FIELD(I2CS_DMA_RX_ADDR, ADDR, 0, 31)
+REG32(I2CS_DEV_ADDR, 0x40)
+REG32(I2CM_DMA_LEN_STS, 0x48)
+ FIELD(I2CM_DMA_LEN_STS, RX_LEN, 16, 13)
+ FIELD(I2CM_DMA_LEN_STS, TX_LEN, 0, 13)
+REG32(I2CS_DMA_LEN_STS, 0x4c)
+ FIELD(I2CS_DMA_LEN_STS, RX_LEN, 16, 13)
+ FIELD(I2CS_DMA_LEN_STS, TX_LEN, 0, 13)
+REG32(I2CC_DMA_ADDR, 0x50)
+REG32(I2CC_DMA_LEN, 0x54)
struct AspeedI2CState;
-typedef struct AspeedI2CBus {
+#define TYPE_ASPEED_I2C_BUS "aspeed.i2c.bus"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBus, ASPEED_I2C_BUS)
+struct AspeedI2CBus {
+ SysBusDevice parent_obj;
+
struct AspeedI2CState *controller;
+ /* slave mode */
+ I2CSlave *slave;
+
MemoryRegion mr;
I2CBus *bus;
uint8_t id;
qemu_irq irq;
- uint32_t ctrl;
- uint32_t timing[2];
- uint32_t intr_ctrl;
- uint32_t intr_status;
- uint32_t cmd;
- uint32_t buf;
- uint32_t pool_ctrl;
- uint32_t dma_addr;
- uint32_t dma_len;
-} AspeedI2CBus;
-
-typedef struct AspeedI2CState {
+ uint32_t regs[ASPEED_I2C_NEW_NUM_REG];
+};
+
+struct AspeedI2CState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -64,20 +255,22 @@ typedef struct AspeedI2CState {
uint32_t intr_status;
uint32_t ctrl_global;
+ uint32_t new_clk_divider;
MemoryRegion pool_iomem;
uint8_t pool[ASPEED_I2C_MAX_POOL_SIZE];
AspeedI2CBus busses[ASPEED_I2C_NR_BUSSES];
MemoryRegion *dram_mr;
AddressSpace dram_as;
-} AspeedI2CState;
+};
-#define ASPEED_I2C_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedI2CClass, (klass), TYPE_ASPEED_I2C)
-#define ASPEED_I2C_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedI2CClass, (obj), TYPE_ASPEED_I2C)
+#define TYPE_ASPEED_I2C_BUS_SLAVE "aspeed.i2c.slave"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBusSlave, ASPEED_I2C_BUS_SLAVE)
+struct AspeedI2CBusSlave {
+ I2CSlave i2c;
+};
-typedef struct AspeedI2CClass {
+struct AspeedI2CClass {
SysBusDeviceClass parent_class;
uint8_t num_busses;
@@ -91,7 +284,105 @@ typedef struct AspeedI2CClass {
bool check_sram;
bool has_dma;
-} AspeedI2CClass;
+};
+
+static inline bool aspeed_i2c_is_new_mode(AspeedI2CState *s)
+{
+ return FIELD_EX32(s->ctrl_global, I2C_CTRL_GLOBAL, REG_MODE);
+}
+
+static inline bool aspeed_i2c_bus_pkt_mode_en(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return ARRAY_FIELD_EX32(bus->regs, I2CM_CMD, PKT_OP_EN);
+ }
+ return false;
+}
+
+static inline uint32_t aspeed_i2c_bus_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_FUN_CTRL;
+ }
+ return R_I2CD_FUN_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_cmd_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_CMD;
+ }
+ return R_I2CD_CMD;
+}
+
+static inline uint32_t aspeed_i2c_bus_dev_addr_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CS_DEV_ADDR;
+ }
+ return R_I2CD_DEV_ADDR;
+}
+
+static inline uint32_t aspeed_i2c_bus_intr_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_INTR_CTRL;
+ }
+ return R_I2CD_INTR_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_intr_sts_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CM_INTR_STS;
+ }
+ return R_I2CD_INTR_STS;
+}
+
+static inline uint32_t aspeed_i2c_bus_pool_ctrl_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_POOL_CTRL;
+ }
+ return R_I2CD_POOL_CTRL;
+}
+
+static inline uint32_t aspeed_i2c_bus_byte_buf_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_MS_TXRX_BYTE_BUF;
+ }
+ return R_I2CD_BYTE_BUF;
+}
+
+static inline uint32_t aspeed_i2c_bus_dma_len_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_DMA_LEN;
+ }
+ return R_I2CD_DMA_LEN;
+}
+
+static inline uint32_t aspeed_i2c_bus_dma_addr_offset(AspeedI2CBus *bus)
+{
+ if (aspeed_i2c_is_new_mode(bus->controller)) {
+ return R_I2CC_DMA_ADDR;
+ }
+ return R_I2CD_DMA_ADDR;
+}
+
+static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus)
+{
+ return SHARED_ARRAY_FIELD_EX32(bus->regs, aspeed_i2c_bus_ctrl_offset(bus),
+ MASTER_EN);
+}
+
+static inline bool aspeed_i2c_bus_is_enabled(AspeedI2CBus *bus)
+{
+ uint32_t ctrl_reg = aspeed_i2c_bus_ctrl_offset(bus);
+ return SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, MASTER_EN) ||
+ SHARED_ARRAY_FIELD_EX32(bus->regs, ctrl_reg, SLAVE_EN);
+}
I2CBus *aspeed_i2c_get_bus(AspeedI2CState *s, int busnr);
diff --git a/include/hw/i2c/bcm2835_i2c.h b/include/hw/i2c/bcm2835_i2c.h
new file mode 100644
index 0000000000..0a56df4720
--- /dev/null
+++ b/include/hw/i2c/bcm2835_i2c.h
@@ -0,0 +1,80 @@
+/*
+ * Broadcom Serial Controller (BSC)
+ *
+ * Copyright (c) 2024 Rayhan Faizel <rayhan.faizel@gmail.com>
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/i2c/i2c.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_I2C "bcm2835-i2c"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835I2CState, BCM2835_I2C)
+
+#define BCM2835_I2C_C 0x0 /* Control */
+#define BCM2835_I2C_S 0x4 /* Status */
+#define BCM2835_I2C_DLEN 0x8 /* Data Length */
+#define BCM2835_I2C_A 0xc /* Slave Address */
+#define BCM2835_I2C_FIFO 0x10 /* FIFO */
+#define BCM2835_I2C_DIV 0x14 /* Clock Divider */
+#define BCM2835_I2C_DEL 0x18 /* Data Delay */
+#define BCM2835_I2C_CLKT 0x20 /* Clock Stretch Timeout */
+
+#define BCM2835_I2C_C_I2CEN BIT(15) /* I2C enable */
+#define BCM2835_I2C_C_INTR BIT(10) /* Interrupt on RXR */
+#define BCM2835_I2C_C_INTT BIT(9) /* Interrupt on TXW */
+#define BCM2835_I2C_C_INTD BIT(8) /* Interrupt on DONE */
+#define BCM2835_I2C_C_ST BIT(7) /* Start transfer */
+#define BCM2835_I2C_C_CLEAR (BIT(5) | BIT(4)) /* Clear FIFO */
+#define BCM2835_I2C_C_READ BIT(0) /* I2C read mode */
+
+#define BCM2835_I2C_S_CLKT BIT(9) /* Clock stretch timeout */
+#define BCM2835_I2C_S_ERR BIT(8) /* Slave error */
+#define BCM2835_I2C_S_RXF BIT(7) /* RX FIFO full */
+#define BCM2835_I2C_S_TXE BIT(6) /* TX FIFO empty */
+#define BCM2835_I2C_S_RXD BIT(5) /* RX bytes available */
+#define BCM2835_I2C_S_TXD BIT(4) /* TX space available */
+#define BCM2835_I2C_S_RXR BIT(3) /* RX FIFO needs reading */
+#define BCM2835_I2C_S_TXW BIT(2) /* TX FIFO needs writing */
+#define BCM2835_I2C_S_DONE BIT(1) /* I2C Transfer complete */
+#define BCM2835_I2C_S_TA BIT(0) /* I2C Transfer active */
+
+struct BCM2835I2CState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint32_t c;
+ uint32_t s;
+ uint32_t dlen;
+ uint32_t a;
+ uint32_t div;
+ uint32_t del;
+ uint32_t clkt;
+
+ uint32_t last_dlen;
+};
diff --git a/include/hw/i2c/bitbang_i2c.h b/include/hw/i2c/bitbang_i2c.h
index 92334e9016..a079e6d70f 100644
--- a/include/hw/i2c/bitbang_i2c.h
+++ b/include/hw/i2c/bitbang_i2c.h
@@ -3,6 +3,8 @@
#include "hw/i2c/i2c.h"
+#define TYPE_GPIO_I2C "gpio_i2c"
+
typedef struct bitbang_i2c_interface bitbang_i2c_interface;
#define BITBANG_I2C_SDA 0
diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h
index a9c030a512..2a3abacd1b 100644
--- a/include/hw/i2c/i2c.h
+++ b/include/hw/i2c/i2c.h
@@ -2,6 +2,7 @@
#define QEMU_I2C_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
/* The QEMU I2C implementation only supports simple transfers that complete
immediately. It does not support slave devices that need to be able to
@@ -11,26 +12,26 @@
enum i2c_event {
I2C_START_RECV,
I2C_START_SEND,
+ I2C_START_SEND_ASYNC,
I2C_FINISH,
I2C_NACK /* Masker NACKed a receive byte. */
};
-typedef struct I2CSlave I2CSlave;
+typedef struct I2CNodeList I2CNodeList;
#define TYPE_I2C_SLAVE "i2c-slave"
-#define I2C_SLAVE(obj) \
- OBJECT_CHECK(I2CSlave, (obj), TYPE_I2C_SLAVE)
-#define I2C_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(I2CSlaveClass, (klass), TYPE_I2C_SLAVE)
-#define I2C_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(I2CSlaveClass, (obj), TYPE_I2C_SLAVE)
-
-typedef struct I2CSlaveClass {
+OBJECT_DECLARE_TYPE(I2CSlave, I2CSlaveClass,
+ I2C_SLAVE)
+
+struct I2CSlaveClass {
DeviceClass parent_class;
/* Master to slave. Returns non-zero for a NAK, 0 for success. */
int (*send)(I2CSlave *s, uint8_t data);
+ /* Master to slave (asynchronous). Receiving slave must call i2c_ack(). */
+ void (*send_async)(I2CSlave *s, uint8_t data);
+
/*
* Slave to master. This cannot fail, the device should always
* return something here.
@@ -43,7 +44,17 @@ typedef struct I2CSlaveClass {
* return code is not used and should be zero.
*/
int (*event)(I2CSlave *s, enum i2c_event event);
-} I2CSlaveClass;
+
+ /*
+ * Check if this device matches the address provided. Returns bool of
+ * true if it matches (or broadcast), and updates the device list, false
+ * otherwise.
+ *
+ * If broadcast is true, match should add the device and return true.
+ */
+ bool (*match_and_add)(I2CSlave *candidate, uint8_t address, bool broadcast,
+ I2CNodeList *current_devs);
+};
struct I2CSlave {
DeviceState qdev;
@@ -53,7 +64,7 @@ struct I2CSlave {
};
#define TYPE_I2C_BUS "i2c-bus"
-#define I2C_BUS(obj) OBJECT_CHECK(I2CBus, (obj), TYPE_I2C_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(I2CBus, I2C_BUS)
typedef struct I2CNode I2CNode;
@@ -62,22 +73,86 @@ struct I2CNode {
QLIST_ENTRY(I2CNode) next;
};
+typedef struct I2CPendingMaster I2CPendingMaster;
+
+struct I2CPendingMaster {
+ QEMUBH *bh;
+ QSIMPLEQ_ENTRY(I2CPendingMaster) entry;
+};
+
+typedef QLIST_HEAD(I2CNodeList, I2CNode) I2CNodeList;
+typedef QSIMPLEQ_HEAD(I2CPendingMasters, I2CPendingMaster) I2CPendingMasters;
+
struct I2CBus {
BusState qbus;
- QLIST_HEAD(, I2CNode) current_devs;
+ I2CNodeList current_devs;
+ I2CPendingMasters pending_masters;
uint8_t saved_address;
bool broadcast;
+
+ /* Set from slave currently mastering the bus. */
+ QEMUBH *bh;
};
I2CBus *i2c_init_bus(DeviceState *parent, const char *name);
-void i2c_set_slave_address(I2CSlave *dev, uint8_t address);
int i2c_bus_busy(I2CBus *bus);
-int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv);
+
+/**
+ * i2c_start_transfer: start a transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ * @is_recv: indicates the transfer direction
+ *
+ * When @is_recv is a known boolean constant, use the
+ * i2c_start_recv() or i2c_start_send() helper instead.
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv);
+
+/**
+ * i2c_start_recv: start a 'receive' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_recv(I2CBus *bus, uint8_t address);
+
+/**
+ * i2c_start_send: start a 'send' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int i2c_start_send(I2CBus *bus, uint8_t address);
+
+/**
+ * i2c_start_send_async: start an asynchronous 'send' transfer on an I2C bus.
+ *
+ * @bus: #I2CBus to be used
+ * @address: address of the slave
+ *
+ * Return: 0 on success, -1 on error
+ */
+int i2c_start_send_async(I2CBus *bus, uint8_t address);
+
+void i2c_schedule_pending_master(I2CBus *bus);
+
void i2c_end_transfer(I2CBus *bus);
void i2c_nack(I2CBus *bus);
-int i2c_send_recv(I2CBus *bus, uint8_t *data, bool send);
+void i2c_ack(I2CBus *bus);
+void i2c_bus_master(I2CBus *bus, QEMUBH *bh);
+void i2c_bus_release(I2CBus *bus);
int i2c_send(I2CBus *bus, uint8_t data);
+int i2c_send_async(I2CBus *bus, uint8_t data);
uint8_t i2c_recv(I2CBus *bus);
+bool i2c_scan_bus(I2CBus *bus, uint8_t address, bool broadcast,
+ I2CNodeList *current_devs);
/**
* Create an I2C slave device on the heap.
@@ -102,7 +177,7 @@ I2CSlave *i2c_slave_new(const char *name, uint8_t addr);
I2CSlave *i2c_slave_create_simple(I2CBus *bus, const char *name, uint8_t addr);
/**
- * Realize and and drop a reference an I2C slave device
+ * Realize and drop a reference an I2C slave device
* @dev: I2C slave device to realize
* @bus: I2C bus to put it on
* @addr: I2C address of the slave on the bus
@@ -131,8 +206,12 @@ I2CSlave *i2c_slave_create_simple(I2CBus *bus, const char *name, uint8_t addr);
*/
bool i2c_slave_realize_and_unref(I2CSlave *dev, I2CBus *bus, Error **errp);
-/* lm832x.c */
-void lm832x_key_event(DeviceState *dev, int key, int state);
+/**
+ * Set the I2C bus address of a slave device
+ * @dev: I2C slave device
+ * @address: I2C address of the slave when put on a bus
+ */
+void i2c_slave_set_address(I2CSlave *dev, uint8_t address);
extern const VMStateDescription vmstate_i2c_slave;
diff --git a/include/hw/i2c/i2c_mux_pca954x.h b/include/hw/i2c/i2c_mux_pca954x.h
new file mode 100644
index 0000000000..3dd25ec983
--- /dev/null
+++ b/include/hw/i2c/i2c_mux_pca954x.h
@@ -0,0 +1,19 @@
+#ifndef QEMU_I2C_MUX_PCA954X_H
+#define QEMU_I2C_MUX_PCA954X_H
+
+#include "hw/i2c/i2c.h"
+
+#define TYPE_PCA9546 "pca9546"
+#define TYPE_PCA9548 "pca9548"
+
+/**
+ * Retrieves the i2c bus associated with the specified channel on this i2c
+ * mux.
+ * @mux: an i2c mux device.
+ * @channel: the i2c channel requested
+ *
+ * Returns: a pointer to the associated i2c bus.
+ */
+I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel);
+
+#endif
diff --git a/include/hw/i2c/imx_i2c.h b/include/hw/i2c/imx_i2c.h
index 7c73a1fa28..e4f91339f5 100644
--- a/include/hw/i2c/imx_i2c.h
+++ b/include/hw/i2c/imx_i2c.h
@@ -22,9 +22,10 @@
#define IMX_I2C_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_I2C "imx.i2c"
-#define IMX_I2C(obj) OBJECT_CHECK(IMXI2CState, (obj), TYPE_IMX_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXI2CState, IMX_I2C)
#define IMX_I2C_MEM_SIZE 0x14
@@ -65,7 +66,7 @@
#define ADDR_RESET 0xFF00
-typedef struct IMXI2CState {
+struct IMXI2CState {
/*< private >*/
SysBusDevice parent_obj;
@@ -82,6 +83,6 @@ typedef struct IMXI2CState {
uint16_t i2sr;
uint16_t i2dr_read;
uint16_t i2dr_write;
-} IMXI2CState;
+};
#endif /* IMX_I2C_H */
diff --git a/include/hw/i2c/microbit_i2c.h b/include/hw/i2c/microbit_i2c.h
index 2bff36680c..3c29e09bf3 100644
--- a/include/hw/i2c/microbit_i2c.h
+++ b/include/hw/i2c/microbit_i2c.h
@@ -13,6 +13,7 @@
#include "hw/sysbus.h"
#include "hw/arm/nrf51.h"
+#include "qom/object.h"
#define NRF51_TWI_TASK_STARTRX 0x000
#define NRF51_TWI_TASK_STARTTX 0x008
@@ -26,17 +27,16 @@
#define NRF51_TWI_REG_ADDRESS 0x588
#define TYPE_MICROBIT_I2C "microbit.i2c"
-#define MICROBIT_I2C(obj) \
- OBJECT_CHECK(MicrobitI2CState, (obj), TYPE_MICROBIT_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(MicrobitI2CState, MICROBIT_I2C)
#define MICROBIT_I2C_NREGS (NRF51_PERIPHERAL_SIZE / sizeof(uint32_t))
-typedef struct {
+struct MicrobitI2CState {
SysBusDevice parent_obj;
MemoryRegion iomem;
uint32_t regs[MICROBIT_I2C_NREGS];
uint32_t read_idx;
-} MicrobitI2CState;
+};
#endif /* MICROBIT_I2C_H */
diff --git a/include/hw/i2c/npcm7xx_smbus.h b/include/hw/i2c/npcm7xx_smbus.h
new file mode 100644
index 0000000000..dc45963c0e
--- /dev/null
+++ b/include/hw/i2c/npcm7xx_smbus.h
@@ -0,0 +1,112 @@
+/*
+ * Nuvoton NPCM7xx SMBus Module.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_SMBUS_H
+#define NPCM7XX_SMBUS_H
+
+#include "exec/memory.h"
+#include "hw/i2c/i2c.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of addresses this module contains. Do not change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_SMBUS_NR_ADDRS 10
+
+/* Size of the FIFO buffer. */
+#define NPCM7XX_SMBUS_FIFO_SIZE 16
+
+typedef enum NPCM7xxSMBusStatus {
+ NPCM7XX_SMBUS_STATUS_IDLE,
+ NPCM7XX_SMBUS_STATUS_SENDING,
+ NPCM7XX_SMBUS_STATUS_RECEIVING,
+ NPCM7XX_SMBUS_STATUS_NEGACK,
+ NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE,
+ NPCM7XX_SMBUS_STATUS_STOPPING_NEGACK,
+} NPCM7xxSMBusStatus;
+
+/*
+ * struct NPCM7xxSMBusState - System Management Bus device state.
+ * @bus: The underlying I2C Bus.
+ * @irq: GIC interrupt line to fire on events (if enabled).
+ * @sda: The serial data register.
+ * @st: The status register.
+ * @cst: The control status register.
+ * @cst2: The control status register 2.
+ * @cst3: The control status register 3.
+ * @ctl1: The control register 1.
+ * @ctl2: The control register 2.
+ * @ctl3: The control register 3.
+ * @ctl4: The control register 4.
+ * @ctl5: The control register 5.
+ * @addr: The SMBus module's own addresses on the I2C bus.
+ * @scllt: The SCL low time register.
+ * @sclht: The SCL high time register.
+ * @fif_ctl: The FIFO control register.
+ * @fif_cts: The FIFO control status register.
+ * @fair_per: The fair period register.
+ * @txf_ctl: The transmit FIFO control register.
+ * @t_out: The SMBus timeout register.
+ * @txf_sts: The transmit FIFO status register.
+ * @rxf_sts: The receive FIFO status register.
+ * @rxf_ctl: The receive FIFO control register.
+ * @rx_fifo: The FIFO buffer for receiving in FIFO mode.
+ * @rx_cur: The current position of rx_fifo.
+ * @status: The current status of the SMBus.
+ */
+struct NPCM7xxSMBusState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ I2CBus *bus;
+ qemu_irq irq;
+
+ uint8_t sda;
+ uint8_t st;
+ uint8_t cst;
+ uint8_t cst2;
+ uint8_t cst3;
+ uint8_t ctl1;
+ uint8_t ctl2;
+ uint8_t ctl3;
+ uint8_t ctl4;
+ uint8_t ctl5;
+ uint8_t addr[NPCM7XX_SMBUS_NR_ADDRS];
+
+ uint8_t scllt;
+ uint8_t sclht;
+
+ uint8_t fif_ctl;
+ uint8_t fif_cts;
+ uint8_t fair_per;
+ uint8_t txf_ctl;
+ uint8_t t_out;
+ uint8_t txf_sts;
+ uint8_t rxf_sts;
+ uint8_t rxf_ctl;
+
+ uint8_t rx_fifo[NPCM7XX_SMBUS_FIFO_SIZE];
+ uint8_t rx_cur;
+
+ NPCM7xxSMBusStatus status;
+};
+
+#define TYPE_NPCM7XX_SMBUS "npcm7xx-smbus"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSMBusState, NPCM7XX_SMBUS)
+
+#endif /* NPCM7XX_SMBUS_H */
diff --git a/include/hw/i2c/pmbus_device.h b/include/hw/i2c/pmbus_device.h
new file mode 100644
index 0000000000..f195c11384
--- /dev/null
+++ b/include/hw/i2c/pmbus_device.h
@@ -0,0 +1,564 @@
+/*
+ * QEMU PMBus device emulation
+ *
+ * Copyright 2021 Google LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PMBUS_DEVICE_H
+#define HW_PMBUS_DEVICE_H
+
+#include "qemu/bitops.h"
+#include "hw/i2c/smbus_slave.h"
+
+enum pmbus_registers {
+ PMBUS_PAGE = 0x00, /* R/W byte */
+ PMBUS_OPERATION = 0x01, /* R/W byte */
+ PMBUS_ON_OFF_CONFIG = 0x02, /* R/W byte */
+ PMBUS_CLEAR_FAULTS = 0x03, /* Send Byte */
+ PMBUS_PHASE = 0x04, /* R/W byte */
+ PMBUS_PAGE_PLUS_WRITE = 0x05, /* Block Write-only */
+ PMBUS_PAGE_PLUS_READ = 0x06, /* Block Read-only */
+ PMBUS_WRITE_PROTECT = 0x10, /* R/W byte */
+ PMBUS_STORE_DEFAULT_ALL = 0x11, /* Send Byte */
+ PMBUS_RESTORE_DEFAULT_ALL = 0x12, /* Send Byte */
+ PMBUS_STORE_DEFAULT_CODE = 0x13, /* Write-only Byte */
+ PMBUS_RESTORE_DEFAULT_CODE = 0x14, /* Write-only Byte */
+ PMBUS_STORE_USER_ALL = 0x15, /* Send Byte */
+ PMBUS_RESTORE_USER_ALL = 0x16, /* Send Byte */
+ PMBUS_STORE_USER_CODE = 0x17, /* Write-only Byte */
+ PMBUS_RESTORE_USER_CODE = 0x18, /* Write-only Byte */
+ PMBUS_CAPABILITY = 0x19, /* Read-Only byte */
+ PMBUS_QUERY = 0x1A, /* Write-Only */
+ PMBUS_SMBALERT_MASK = 0x1B, /* Block read, Word write */
+ PMBUS_VOUT_MODE = 0x20, /* R/W byte */
+ PMBUS_VOUT_COMMAND = 0x21, /* R/W word */
+ PMBUS_VOUT_TRIM = 0x22, /* R/W word */
+ PMBUS_VOUT_CAL_OFFSET = 0x23, /* R/W word */
+ PMBUS_VOUT_MAX = 0x24, /* R/W word */
+ PMBUS_VOUT_MARGIN_HIGH = 0x25, /* R/W word */
+ PMBUS_VOUT_MARGIN_LOW = 0x26, /* R/W word */
+ PMBUS_VOUT_TRANSITION_RATE = 0x27, /* R/W word */
+ PMBUS_VOUT_DROOP = 0x28, /* R/W word */
+ PMBUS_VOUT_SCALE_LOOP = 0x29, /* R/W word */
+ PMBUS_VOUT_SCALE_MONITOR = 0x2A, /* R/W word */
+ PMBUS_VOUT_MIN = 0x2B, /* R/W word */
+ PMBUS_COEFFICIENTS = 0x30, /* Read-only block 5 bytes */
+ PMBUS_POUT_MAX = 0x31, /* R/W word */
+ PMBUS_MAX_DUTY = 0x32, /* R/W word */
+ PMBUS_FREQUENCY_SWITCH = 0x33, /* R/W word */
+ PMBUS_VIN_ON = 0x35, /* R/W word */
+ PMBUS_VIN_OFF = 0x36, /* R/W word */
+ PMBUS_INTERLEAVE = 0x37, /* R/W word */
+ PMBUS_IOUT_CAL_GAIN = 0x38, /* R/W word */
+ PMBUS_IOUT_CAL_OFFSET = 0x39, /* R/W word */
+ PMBUS_FAN_CONFIG_1_2 = 0x3A, /* R/W byte */
+ PMBUS_FAN_COMMAND_1 = 0x3B, /* R/W word */
+ PMBUS_FAN_COMMAND_2 = 0x3C, /* R/W word */
+ PMBUS_FAN_CONFIG_3_4 = 0x3D, /* R/W byte */
+ PMBUS_FAN_COMMAND_3 = 0x3E, /* R/W word */
+ PMBUS_FAN_COMMAND_4 = 0x3F, /* R/W word */
+ PMBUS_VOUT_OV_FAULT_LIMIT = 0x40, /* R/W word */
+ PMBUS_VOUT_OV_FAULT_RESPONSE = 0x41, /* R/W byte */
+ PMBUS_VOUT_OV_WARN_LIMIT = 0x42, /* R/W word */
+ PMBUS_VOUT_UV_WARN_LIMIT = 0x43, /* R/W word */
+ PMBUS_VOUT_UV_FAULT_LIMIT = 0x44, /* R/W word */
+ PMBUS_VOUT_UV_FAULT_RESPONSE = 0x45, /* R/W byte */
+ PMBUS_IOUT_OC_FAULT_LIMIT = 0x46, /* R/W word */
+ PMBUS_IOUT_OC_FAULT_RESPONSE = 0x47, /* R/W byte */
+ PMBUS_IOUT_OC_LV_FAULT_LIMIT = 0x48, /* R/W word */
+ PMBUS_IOUT_OC_LV_FAULT_RESPONSE = 0x49, /* R/W byte */
+ PMBUS_IOUT_OC_WARN_LIMIT = 0x4A, /* R/W word */
+ PMBUS_IOUT_UC_FAULT_LIMIT = 0x4B, /* R/W word */
+ PMBUS_IOUT_UC_FAULT_RESPONSE = 0x4C, /* R/W byte */
+ PMBUS_OT_FAULT_LIMIT = 0x4F, /* R/W word */
+ PMBUS_OT_FAULT_RESPONSE = 0x50, /* R/W byte */
+ PMBUS_OT_WARN_LIMIT = 0x51, /* R/W word */
+ PMBUS_UT_WARN_LIMIT = 0x52, /* R/W word */
+ PMBUS_UT_FAULT_LIMIT = 0x53, /* R/W word */
+ PMBUS_UT_FAULT_RESPONSE = 0x54, /* R/W byte */
+ PMBUS_VIN_OV_FAULT_LIMIT = 0x55, /* R/W word */
+ PMBUS_VIN_OV_FAULT_RESPONSE = 0x56, /* R/W byte */
+ PMBUS_VIN_OV_WARN_LIMIT = 0x57, /* R/W word */
+ PMBUS_VIN_UV_WARN_LIMIT = 0x58, /* R/W word */
+ PMBUS_VIN_UV_FAULT_LIMIT = 0x59, /* R/W word */
+ PMBUS_VIN_UV_FAULT_RESPONSE = 0x5A, /* R/W byte */
+ PMBUS_IIN_OC_FAULT_LIMIT = 0x5B, /* R/W word */
+ PMBUS_IIN_OC_FAULT_RESPONSE = 0x5C, /* R/W byte */
+ PMBUS_IIN_OC_WARN_LIMIT = 0x5D, /* R/W word */
+ PMBUS_POWER_GOOD_ON = 0x5E, /* R/W word */
+ PMBUS_POWER_GOOD_OFF = 0x5F, /* R/W word */
+ PMBUS_TON_DELAY = 0x60, /* R/W word */
+ PMBUS_TON_RISE = 0x61, /* R/W word */
+ PMBUS_TON_MAX_FAULT_LIMIT = 0x62, /* R/W word */
+ PMBUS_TON_MAX_FAULT_RESPONSE = 0x63, /* R/W byte */
+ PMBUS_TOFF_DELAY = 0x64, /* R/W word */
+ PMBUS_TOFF_FALL = 0x65, /* R/W word */
+ PMBUS_TOFF_MAX_WARN_LIMIT = 0x66, /* R/W word */
+ PMBUS_POUT_OP_FAULT_LIMIT = 0x68, /* R/W word */
+ PMBUS_POUT_OP_FAULT_RESPONSE = 0x69, /* R/W byte */
+ PMBUS_POUT_OP_WARN_LIMIT = 0x6A, /* R/W word */
+ PMBUS_PIN_OP_WARN_LIMIT = 0x6B, /* R/W word */
+ PMBUS_STATUS_BYTE = 0x78, /* R/W byte */
+ PMBUS_STATUS_WORD = 0x79, /* R/W word */
+ PMBUS_STATUS_VOUT = 0x7A, /* R/W byte */
+ PMBUS_STATUS_IOUT = 0x7B, /* R/W byte */
+ PMBUS_STATUS_INPUT = 0x7C, /* R/W byte */
+ PMBUS_STATUS_TEMPERATURE = 0x7D, /* R/W byte */
+ PMBUS_STATUS_CML = 0x7E, /* R/W byte */
+ PMBUS_STATUS_OTHER = 0x7F, /* R/W byte */
+ PMBUS_STATUS_MFR_SPECIFIC = 0x80, /* R/W byte */
+ PMBUS_STATUS_FANS_1_2 = 0x81, /* R/W byte */
+ PMBUS_STATUS_FANS_3_4 = 0x82, /* R/W byte */
+ PMBUS_READ_EIN = 0x86, /* Read-Only block 5 bytes */
+ PMBUS_READ_EOUT = 0x87, /* Read-Only block 5 bytes */
+ PMBUS_READ_VIN = 0x88, /* Read-Only word */
+ PMBUS_READ_IIN = 0x89, /* Read-Only word */
+ PMBUS_READ_VCAP = 0x8A, /* Read-Only word */
+ PMBUS_READ_VOUT = 0x8B, /* Read-Only word */
+ PMBUS_READ_IOUT = 0x8C, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_1 = 0x8D, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_2 = 0x8E, /* Read-Only word */
+ PMBUS_READ_TEMPERATURE_3 = 0x8F, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_1 = 0x90, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_2 = 0x91, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_3 = 0x92, /* Read-Only word */
+ PMBUS_READ_FAN_SPEED_4 = 0x93, /* Read-Only word */
+ PMBUS_READ_DUTY_CYCLE = 0x94, /* Read-Only word */
+ PMBUS_READ_FREQUENCY = 0x95, /* Read-Only word */
+ PMBUS_READ_POUT = 0x96, /* Read-Only word */
+ PMBUS_READ_PIN = 0x97, /* Read-Only word */
+ PMBUS_REVISION = 0x98, /* Read-Only byte */
+ PMBUS_MFR_ID = 0x99, /* R/W block */
+ PMBUS_MFR_MODEL = 0x9A, /* R/W block */
+ PMBUS_MFR_REVISION = 0x9B, /* R/W block */
+ PMBUS_MFR_LOCATION = 0x9C, /* R/W block */
+ PMBUS_MFR_DATE = 0x9D, /* R/W block */
+ PMBUS_MFR_SERIAL = 0x9E, /* R/W block */
+ PMBUS_APP_PROFILE_SUPPORT = 0x9F, /* Read-Only block-read */
+ PMBUS_MFR_VIN_MIN = 0xA0, /* Read-Only word */
+ PMBUS_MFR_VIN_MAX = 0xA1, /* Read-Only word */
+ PMBUS_MFR_IIN_MAX = 0xA2, /* Read-Only word */
+ PMBUS_MFR_PIN_MAX = 0xA3, /* Read-Only word */
+ PMBUS_MFR_VOUT_MIN = 0xA4, /* Read-Only word */
+ PMBUS_MFR_VOUT_MAX = 0xA5, /* Read-Only word */
+ PMBUS_MFR_IOUT_MAX = 0xA6, /* Read-Only word */
+ PMBUS_MFR_POUT_MAX = 0xA7, /* Read-Only word */
+ PMBUS_MFR_TAMBIENT_MAX = 0xA8, /* Read-Only word */
+ PMBUS_MFR_TAMBIENT_MIN = 0xA9, /* Read-Only word */
+ PMBUS_MFR_EFFICIENCY_LL = 0xAA, /* Read-Only block 14 bytes */
+ PMBUS_MFR_EFFICIENCY_HL = 0xAB, /* Read-Only block 14 bytes */
+ PMBUS_MFR_PIN_ACCURACY = 0xAC, /* Read-Only byte */
+ PMBUS_IC_DEVICE_ID = 0xAD, /* Read-Only block-read */
+ PMBUS_IC_DEVICE_REV = 0xAE, /* Read-Only block-read */
+ PMBUS_MFR_MAX_TEMP_1 = 0xC0, /* R/W word */
+ PMBUS_MFR_MAX_TEMP_2 = 0xC1, /* R/W word */
+ PMBUS_MFR_MAX_TEMP_3 = 0xC2, /* R/W word */
+ PMBUS_IDLE_STATE = 0xFF,
+};
+
+/* STATUS_WORD */
+#define PB_STATUS_VOUT BIT(15)
+#define PB_STATUS_IOUT_POUT BIT(14)
+#define PB_STATUS_INPUT BIT(13)
+#define PB_STATUS_WORD_MFR BIT(12)
+#define PB_STATUS_POWER_GOOD_N BIT(11)
+#define PB_STATUS_FAN BIT(10)
+#define PB_STATUS_OTHER BIT(9)
+#define PB_STATUS_UNKNOWN BIT(8)
+/* STATUS_BYTE */
+#define PB_STATUS_BUSY BIT(7)
+#define PB_STATUS_OFF BIT(6)
+#define PB_STATUS_VOUT_OV BIT(5)
+#define PB_STATUS_IOUT_OC BIT(4)
+#define PB_STATUS_VIN_UV BIT(3)
+#define PB_STATUS_TEMPERATURE BIT(2)
+#define PB_STATUS_CML BIT(1)
+#define PB_STATUS_NONE_ABOVE BIT(0)
+
+/* STATUS_VOUT */
+#define PB_STATUS_VOUT_OV_FAULT BIT(7) /* Output Overvoltage Fault */
+#define PB_STATUS_VOUT_OV_WARN BIT(6) /* Output Overvoltage Warning */
+#define PB_STATUS_VOUT_UV_WARN BIT(5) /* Output Undervoltage Warning */
+#define PB_STATUS_VOUT_UV_FAULT BIT(4) /* Output Undervoltage Fault */
+#define PB_STATUS_VOUT_MAX BIT(3)
+#define PB_STATUS_VOUT_TON_MAX_FAULT BIT(2)
+#define PB_STATUS_VOUT_TOFF_MAX_WARN BIT(1)
+
+/* STATUS_IOUT */
+#define PB_STATUS_IOUT_OC_FAULT BIT(7) /* Output Overcurrent Fault */
+#define PB_STATUS_IOUT_OC_LV_FAULT BIT(6) /* Output OC And Low Voltage Fault */
+#define PB_STATUS_IOUT_OC_WARN BIT(5) /* Output Overcurrent Warning */
+#define PB_STATUS_IOUT_UC_FAULT BIT(4) /* Output Undercurrent Fault */
+#define PB_STATUS_CURR_SHARE BIT(3) /* Current Share Fault */
+#define PB_STATUS_PWR_LIM_MODE BIT(2) /* In Power Limiting Mode */
+#define PB_STATUS_POUT_OP_FAULT BIT(1) /* Output Overpower Fault */
+#define PB_STATUS_POUT_OP_WARN BIT(0) /* Output Overpower Warning */
+
+/* STATUS_INPUT */
+#define PB_STATUS_INPUT_VIN_OV_FAULT BIT(7) /* Input Overvoltage Fault */
+#define PB_STATUS_INPUT_VIN_OV_WARN BIT(6) /* Input Overvoltage Warning */
+#define PB_STATUS_INPUT_VIN_UV_WARN BIT(5) /* Input Undervoltage Warning */
+#define PB_STATUS_INPUT_VIN_UV_FAULT BIT(4) /* Input Undervoltage Fault */
+#define PB_STATUS_INPUT_IIN_OC_FAULT BIT(2) /* Input Overcurrent Fault */
+#define PB_STATUS_INPUT_IIN_OC_WARN BIT(1) /* Input Overcurrent Warning */
+#define PB_STATUS_INPUT_PIN_OP_WARN BIT(0) /* Input Overpower Warning */
+
+/* STATUS_TEMPERATURE */
+#define PB_STATUS_OT_FAULT BIT(7) /* Overtemperature Fault */
+#define PB_STATUS_OT_WARN BIT(6) /* Overtemperature Warning */
+#define PB_STATUS_UT_WARN BIT(5) /* Undertemperature Warning */
+#define PB_STATUS_UT_FAULT BIT(4) /* Undertemperature Fault */
+
+/* STATUS_CML */
+#define PB_CML_FAULT_INVALID_CMD BIT(7) /* Invalid/Unsupported Command */
+#define PB_CML_FAULT_INVALID_DATA BIT(6) /* Invalid/Unsupported Data */
+#define PB_CML_FAULT_PEC BIT(5) /* Packet Error Check Failed */
+#define PB_CML_FAULT_MEMORY BIT(4) /* Memory Fault Detected */
+#define PB_CML_FAULT_PROCESSOR BIT(3) /* Processor Fault Detected */
+#define PB_CML_FAULT_OTHER_COMM BIT(1) /* Other communication fault */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC BIT(0) /* Other Memory Or Logic Fault */
+
+/* OPERATION*/
+#define PB_OP_ON BIT(7) /* PSU is switched on */
+#define PB_OP_MARGIN_HIGH BIT(5) /* PSU vout is set to margin high */
+#define PB_OP_MARGIN_LOW BIT(4) /* PSU vout is set to margin low */
+
+/* PAGES */
+#define PB_MAX_PAGES 0x1F
+#define PB_ALL_PAGES 0xFF
+
+#define PMBUS_ERR_BYTE 0xFF
+
+#define TYPE_PMBUS_DEVICE "pmbus-device"
+OBJECT_DECLARE_TYPE(PMBusDevice, PMBusDeviceClass,
+ PMBUS_DEVICE)
+
+/* flags */
+#define PB_HAS_COEFFICIENTS BIT_ULL(9)
+#define PB_HAS_VIN BIT_ULL(10)
+#define PB_HAS_VOUT BIT_ULL(11)
+#define PB_HAS_VOUT_MARGIN BIT_ULL(12)
+#define PB_HAS_VIN_RATING BIT_ULL(13)
+#define PB_HAS_VOUT_RATING BIT_ULL(14)
+#define PB_HAS_VOUT_MODE BIT_ULL(15)
+#define PB_HAS_VCAP BIT_ULL(16)
+#define PB_HAS_IOUT BIT_ULL(21)
+#define PB_HAS_IIN BIT_ULL(22)
+#define PB_HAS_IOUT_RATING BIT_ULL(23)
+#define PB_HAS_IIN_RATING BIT_ULL(24)
+#define PB_HAS_IOUT_GAIN BIT_ULL(25)
+#define PB_HAS_POUT BIT_ULL(30)
+#define PB_HAS_PIN BIT_ULL(31)
+#define PB_HAS_EIN BIT_ULL(32)
+#define PB_HAS_EOUT BIT_ULL(33)
+#define PB_HAS_POUT_RATING BIT_ULL(34)
+#define PB_HAS_PIN_RATING BIT_ULL(35)
+#define PB_HAS_TEMPERATURE BIT_ULL(40)
+#define PB_HAS_TEMP2 BIT_ULL(41)
+#define PB_HAS_TEMP3 BIT_ULL(42)
+#define PB_HAS_TEMP_RATING BIT_ULL(43)
+#define PB_HAS_FAN BIT_ULL(44)
+#define PB_HAS_MFR_INFO BIT_ULL(50)
+#define PB_HAS_STATUS_MFR_SPECIFIC BIT_ULL(51)
+
+struct PMBusDeviceClass {
+ SMBusDeviceClass parent_class;
+ uint8_t device_num_pages;
+
+ /**
+ * Implement quick_cmd, receive byte, and write_data to support non-standard
+ * PMBus functionality
+ */
+ void (*quick_cmd)(PMBusDevice *dev, uint8_t read);
+ int (*write_data)(PMBusDevice *dev, const uint8_t *buf, uint8_t len);
+ uint8_t (*receive_byte)(PMBusDevice *dev);
+};
+
+/*
+ * According to the spec, each page may offer the full range of PMBus commands
+ * available for each output or non-PMBus device.
+ * Therefore, we can't assume that any registers will always be the same across
+ * all pages.
+ * The page 0xFF is intended for writes to all pages
+ */
+typedef struct PMBusPage {
+ uint64_t page_flags;
+
+ uint8_t page; /* R/W byte */
+ uint8_t operation; /* R/W byte */
+ uint8_t on_off_config; /* R/W byte */
+ uint8_t write_protect; /* R/W byte */
+ uint8_t phase; /* R/W byte */
+ uint8_t vout_mode; /* R/W byte */
+ uint16_t vout_command; /* R/W word */
+ uint16_t vout_trim; /* R/W word */
+ uint16_t vout_cal_offset; /* R/W word */
+ uint16_t vout_max; /* R/W word */
+ uint16_t vout_margin_high; /* R/W word */
+ uint16_t vout_margin_low; /* R/W word */
+ uint16_t vout_transition_rate; /* R/W word */
+ uint16_t vout_droop; /* R/W word */
+ uint16_t vout_scale_loop; /* R/W word */
+ uint16_t vout_scale_monitor; /* R/W word */
+ uint16_t vout_min; /* R/W word */
+ uint8_t coefficients[5]; /* Read-only block 5 bytes */
+ uint16_t pout_max; /* R/W word */
+ uint16_t max_duty; /* R/W word */
+ uint16_t frequency_switch; /* R/W word */
+ uint16_t vin_on; /* R/W word */
+ uint16_t vin_off; /* R/W word */
+ uint16_t iout_cal_gain; /* R/W word */
+ uint16_t iout_cal_offset; /* R/W word */
+ uint8_t fan_config_1_2; /* R/W byte */
+ uint16_t fan_command_1; /* R/W word */
+ uint16_t fan_command_2; /* R/W word */
+ uint8_t fan_config_3_4; /* R/W byte */
+ uint16_t fan_command_3; /* R/W word */
+ uint16_t fan_command_4; /* R/W word */
+ uint16_t vout_ov_fault_limit; /* R/W word */
+ uint8_t vout_ov_fault_response; /* R/W byte */
+ uint16_t vout_ov_warn_limit; /* R/W word */
+ uint16_t vout_uv_warn_limit; /* R/W word */
+ uint16_t vout_uv_fault_limit; /* R/W word */
+ uint8_t vout_uv_fault_response; /* R/W byte */
+ uint16_t iout_oc_fault_limit; /* R/W word */
+ uint8_t iout_oc_fault_response; /* R/W byte */
+ uint16_t iout_oc_lv_fault_limit; /* R/W word */
+ uint8_t iout_oc_lv_fault_response; /* R/W byte */
+ uint16_t iout_oc_warn_limit; /* R/W word */
+ uint16_t iout_uc_fault_limit; /* R/W word */
+ uint8_t iout_uc_fault_response; /* R/W byte */
+ uint16_t ot_fault_limit; /* R/W word */
+ uint8_t ot_fault_response; /* R/W byte */
+ uint16_t ot_warn_limit; /* R/W word */
+ uint16_t ut_warn_limit; /* R/W word */
+ uint16_t ut_fault_limit; /* R/W word */
+ uint8_t ut_fault_response; /* R/W byte */
+ uint16_t vin_ov_fault_limit; /* R/W word */
+ uint8_t vin_ov_fault_response; /* R/W byte */
+ uint16_t vin_ov_warn_limit; /* R/W word */
+ uint16_t vin_uv_warn_limit; /* R/W word */
+ uint16_t vin_uv_fault_limit; /* R/W word */
+ uint8_t vin_uv_fault_response; /* R/W byte */
+ uint16_t iin_oc_fault_limit; /* R/W word */
+ uint8_t iin_oc_fault_response; /* R/W byte */
+ uint16_t iin_oc_warn_limit; /* R/W word */
+ uint16_t power_good_on; /* R/W word */
+ uint16_t power_good_off; /* R/W word */
+ uint16_t ton_delay; /* R/W word */
+ uint16_t ton_rise; /* R/W word */
+ uint16_t ton_max_fault_limit; /* R/W word */
+ uint8_t ton_max_fault_response; /* R/W byte */
+ uint16_t toff_delay; /* R/W word */
+ uint16_t toff_fall; /* R/W word */
+ uint16_t toff_max_warn_limit; /* R/W word */
+ uint16_t pout_op_fault_limit; /* R/W word */
+ uint8_t pout_op_fault_response; /* R/W byte */
+ uint16_t pout_op_warn_limit; /* R/W word */
+ uint16_t pin_op_warn_limit; /* R/W word */
+ uint16_t status_word; /* R/W word */
+ uint8_t status_vout; /* R/W byte */
+ uint8_t status_iout; /* R/W byte */
+ uint8_t status_input; /* R/W byte */
+ uint8_t status_temperature; /* R/W byte */
+ uint8_t status_cml; /* R/W byte */
+ uint8_t status_other; /* R/W byte */
+ uint8_t status_mfr_specific; /* R/W byte */
+ uint8_t status_fans_1_2; /* R/W byte */
+ uint8_t status_fans_3_4; /* R/W byte */
+ uint8_t read_ein[5]; /* Read-Only block 5 bytes */
+ uint8_t read_eout[5]; /* Read-Only block 5 bytes */
+ uint16_t read_vin; /* Read-Only word */
+ uint16_t read_iin; /* Read-Only word */
+ uint16_t read_vcap; /* Read-Only word */
+ uint16_t read_vout; /* Read-Only word */
+ uint16_t read_iout; /* Read-Only word */
+ uint16_t read_temperature_1; /* Read-Only word */
+ uint16_t read_temperature_2; /* Read-Only word */
+ uint16_t read_temperature_3; /* Read-Only word */
+ uint16_t read_fan_speed_1; /* Read-Only word */
+ uint16_t read_fan_speed_2; /* Read-Only word */
+ uint16_t read_fan_speed_3; /* Read-Only word */
+ uint16_t read_fan_speed_4; /* Read-Only word */
+ uint16_t read_duty_cycle; /* Read-Only word */
+ uint16_t read_frequency; /* Read-Only word */
+ uint16_t read_pout; /* Read-Only word */
+ uint16_t read_pin; /* Read-Only word */
+ uint8_t revision; /* Read-Only byte */
+ const char *mfr_id; /* R/W block */
+ const char *mfr_model; /* R/W block */
+ const char *mfr_revision; /* R/W block */
+ const char *mfr_location; /* R/W block */
+ const char *mfr_date; /* R/W block */
+ const char *mfr_serial; /* R/W block */
+ const char *app_profile_support; /* Read-Only block-read */
+ uint16_t mfr_vin_min; /* Read-Only word */
+ uint16_t mfr_vin_max; /* Read-Only word */
+ uint16_t mfr_iin_max; /* Read-Only word */
+ uint16_t mfr_pin_max; /* Read-Only word */
+ uint16_t mfr_vout_min; /* Read-Only word */
+ uint16_t mfr_vout_max; /* Read-Only word */
+ uint16_t mfr_iout_max; /* Read-Only word */
+ uint16_t mfr_pout_max; /* Read-Only word */
+ uint16_t mfr_tambient_max; /* Read-Only word */
+ uint16_t mfr_tambient_min; /* Read-Only word */
+ uint8_t mfr_efficiency_ll[14]; /* Read-Only block 14 bytes */
+ uint8_t mfr_efficiency_hl[14]; /* Read-Only block 14 bytes */
+ uint8_t mfr_pin_accuracy; /* Read-Only byte */
+ uint16_t mfr_max_temp_1; /* R/W word */
+ uint16_t mfr_max_temp_2; /* R/W word */
+ uint16_t mfr_max_temp_3; /* R/W word */
+} PMBusPage;
+
+/* State */
+struct PMBusDevice {
+ SMBusDevice smb;
+
+ uint8_t num_pages;
+ uint8_t code;
+ uint8_t page;
+
+ /*
+ * PMBus registers are stored in a PMBusPage structure allocated by
+ * calling pmbus_pages_alloc()
+ */
+ PMBusPage *pages;
+ uint8_t capability;
+
+
+ int32_t in_buf_len;
+ uint8_t *in_buf;
+ int32_t out_buf_len;
+ uint8_t out_buf[SMBUS_DATA_MAX_LEN];
+};
+
+/**
+ * Direct mode coefficients
+ * @var m - mantissa
+ * @var b - offset
+ * @var R - exponent
+ */
+typedef struct PMBusCoefficients {
+ int32_t m; /* mantissa */
+ int64_t b; /* offset */
+ int32_t R; /* exponent */
+} PMBusCoefficients;
+
+/**
+ * VOUT_Mode bit fields
+ */
+typedef struct PMBusVoutMode {
+ uint8_t mode:3;
+ int8_t exp:5;
+} PMBusVoutMode;
+
+/**
+ * Convert sensor values to direct mode format
+ *
+ * Y = (m * x - b) * 10^R
+ *
+ * @return uint16_t
+ */
+uint16_t pmbus_data2direct_mode(PMBusCoefficients c, uint32_t value);
+
+/**
+ * Convert direct mode formatted data into sensor reading
+ *
+ * X = (Y * 10^-R - b) / m
+ *
+ * @return uint32_t
+ */
+uint32_t pmbus_direct_mode2data(PMBusCoefficients c, uint16_t value);
+
+/**
+ * Convert sensor values to linear mode format
+ *
+ * L = D * 2^(-e)
+ *
+ * @return uint16
+ */
+uint16_t pmbus_data2linear_mode(uint16_t value, int exp);
+
+/**
+ * Convert linear mode formatted data into sensor reading
+ *
+ * D = L * 2^e
+ *
+ * @return uint16
+ */
+uint16_t pmbus_linear_mode2data(uint16_t value, int exp);
+
+/**
+ * @brief Send a block of data over PMBus
+ * Assumes that the bytes in the block are already ordered correctly,
+ * also assumes the length has been prepended to the block if necessary
+ * | low_byte | ... | high_byte |
+ * @param state - maintains state of the PMBus device
+ * @param data - byte array to be sent by device
+ * @param len - number
+ */
+void pmbus_send(PMBusDevice *state, const uint8_t *data, uint16_t len);
+void pmbus_send8(PMBusDevice *state, uint8_t data);
+void pmbus_send16(PMBusDevice *state, uint16_t data);
+void pmbus_send32(PMBusDevice *state, uint32_t data);
+void pmbus_send64(PMBusDevice *state, uint64_t data);
+
+/**
+ * @brief Send a string over PMBus with length prepended.
+ * Length is calculated using str_len()
+ */
+void pmbus_send_string(PMBusDevice *state, const char *data);
+
+/**
+ * @brief Receive data sent with Block Write.
+ * @param dest - memory with enough capacity to receive the write
+ * @param len - the capacity of dest
+ */
+uint8_t pmbus_receive_block(PMBusDevice *pmdev, uint8_t *dest, size_t len);
+
+/**
+ * @brief Receive data over PMBus
+ * These methods help track how much data is being received over PMBus
+ * Log to GUEST_ERROR if too much or too little is sent.
+ */
+uint8_t pmbus_receive8(PMBusDevice *pmdev);
+uint16_t pmbus_receive16(PMBusDevice *pmdev);
+uint32_t pmbus_receive32(PMBusDevice *pmdev);
+uint64_t pmbus_receive64(PMBusDevice *pmdev);
+
+/**
+ * PMBus page config must be called before any page is first used.
+ * It will allocate memory for all the pages if needed.
+ * Passed in flags overwrite existing flags if any.
+ * @param page_index the page to which the flags are applied, setting page_index
+ * to 0xFF applies the passed in flags to all pages.
+ * @param flags
+ */
+int pmbus_page_config(PMBusDevice *pmdev, uint8_t page_index, uint64_t flags);
+
+/**
+ * Update the status registers when sensor values change.
+ * Useful if modifying sensors through qmp, this way status registers get
+ * updated
+ */
+void pmbus_check_limits(PMBusDevice *pmdev);
+
+/**
+ * Enter an idle state where only the PMBUS_ERR_BYTE will be returned
+ * indefinitely until a new command is issued.
+ */
+void pmbus_idle(PMBusDevice *pmdev);
+
+extern const VMStateDescription vmstate_pmbus_device;
+
+#define VMSTATE_PMBUS_DEVICE(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PMBusDevice), \
+ .vmsd = &vmstate_pmbus_device, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PMBusDevice), \
+}
+
+#endif
diff --git a/include/hw/i2c/pnv_i2c_regs.h b/include/hw/i2c/pnv_i2c_regs.h
new file mode 100644
index 0000000000..85e96ff480
--- /dev/null
+++ b/include/hw/i2c/pnv_i2c_regs.h
@@ -0,0 +1,143 @@
+/*
+ * PowerNV I2C Controller Register Definitions
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PNV_I2C_REGS_H
+#define PNV_I2C_REGS_H
+
+/* I2C FIFO register */
+#define I2C_FIFO_REG 0x4
+#define I2C_FIFO PPC_BITMASK(0, 7)
+
+/* I2C command register */
+#define I2C_CMD_REG 0x5
+#define I2C_CMD_WITH_START PPC_BIT(0)
+#define I2C_CMD_WITH_ADDR PPC_BIT(1)
+#define I2C_CMD_READ_CONT PPC_BIT(2)
+#define I2C_CMD_WITH_STOP PPC_BIT(3)
+#define I2C_CMD_INTR_STEERING PPC_BITMASK(6, 7) /* P9 */
+#define I2C_CMD_INTR_STEER_HOST 1
+#define I2C_CMD_INTR_STEER_OCC 2
+#define I2C_CMD_DEV_ADDR PPC_BITMASK(8, 14)
+#define I2C_CMD_READ_NOT_WRITE PPC_BIT(15)
+#define I2C_CMD_LEN_BYTES PPC_BITMASK(16, 31)
+#define I2C_MAX_TFR_LEN 0xfff0ull
+
+/* I2C mode register */
+#define I2C_MODE_REG 0x6
+#define I2C_MODE_BIT_RATE_DIV PPC_BITMASK(0, 15)
+#define I2C_MODE_PORT_NUM PPC_BITMASK(16, 21)
+#define I2C_MODE_ENHANCED PPC_BIT(28)
+#define I2C_MODE_DIAGNOSTIC PPC_BIT(29)
+#define I2C_MODE_PACING_ALLOW PPC_BIT(30)
+#define I2C_MODE_WRAP PPC_BIT(31)
+
+/* I2C watermark register */
+#define I2C_WATERMARK_REG 0x7
+#define I2C_WATERMARK_HIGH PPC_BITMASK(16, 19)
+#define I2C_WATERMARK_LOW PPC_BITMASK(24, 27)
+
+/*
+ * I2C interrupt mask and condition registers
+ *
+ * NB: The function of 0x9 and 0xa changes depending on whether you're reading
+ * or writing to them. When read they return the interrupt condition bits
+ * and on writes they update the interrupt mask register.
+ *
+ * The bit definitions are the same for all the interrupt registers.
+ */
+#define I2C_INTR_MASK_REG 0x8
+
+#define I2C_INTR_RAW_COND_REG 0x9 /* read */
+#define I2C_INTR_MASK_OR_REG 0x9 /* write*/
+
+#define I2C_INTR_COND_REG 0xa /* read */
+#define I2C_INTR_MASK_AND_REG 0xa /* write */
+
+#define I2C_INTR_ALL PPC_BITMASK(16, 31)
+#define I2C_INTR_INVALID_CMD PPC_BIT(16)
+#define I2C_INTR_LBUS_PARITY_ERR PPC_BIT(17)
+#define I2C_INTR_BKEND_OVERRUN_ERR PPC_BIT(18)
+#define I2C_INTR_BKEND_ACCESS_ERR PPC_BIT(19)
+#define I2C_INTR_ARBT_LOST_ERR PPC_BIT(20)
+#define I2C_INTR_NACK_RCVD_ERR PPC_BIT(21)
+#define I2C_INTR_DATA_REQ PPC_BIT(22)
+#define I2C_INTR_CMD_COMP PPC_BIT(23)
+#define I2C_INTR_STOP_ERR PPC_BIT(24)
+#define I2C_INTR_I2C_BUSY PPC_BIT(25)
+#define I2C_INTR_NOT_I2C_BUSY PPC_BIT(26)
+#define I2C_INTR_SCL_EQ_1 PPC_BIT(28)
+#define I2C_INTR_SCL_EQ_0 PPC_BIT(29)
+#define I2C_INTR_SDA_EQ_1 PPC_BIT(30)
+#define I2C_INTR_SDA_EQ_0 PPC_BIT(31)
+
+/* I2C status register */
+#define I2C_RESET_I2C_REG 0xb /* write */
+#define I2C_RESET_ERRORS 0xc
+#define I2C_STAT_REG 0xb /* read */
+#define I2C_STAT_INVALID_CMD PPC_BIT(0)
+#define I2C_STAT_LBUS_PARITY_ERR PPC_BIT(1)
+#define I2C_STAT_BKEND_OVERRUN_ERR PPC_BIT(2)
+#define I2C_STAT_BKEND_ACCESS_ERR PPC_BIT(3)
+#define I2C_STAT_ARBT_LOST_ERR PPC_BIT(4)
+#define I2C_STAT_NACK_RCVD_ERR PPC_BIT(5)
+#define I2C_STAT_DATA_REQ PPC_BIT(6)
+#define I2C_STAT_CMD_COMP PPC_BIT(7)
+#define I2C_STAT_STOP_ERR PPC_BIT(8)
+#define I2C_STAT_UPPER_THRS PPC_BITMASK(9, 15)
+#define I2C_STAT_ANY_I2C_INTR PPC_BIT(16)
+#define I2C_STAT_PORT_HISTORY_BUSY PPC_BIT(19)
+#define I2C_STAT_SCL_INPUT_LEVEL PPC_BIT(20)
+#define I2C_STAT_SDA_INPUT_LEVEL PPC_BIT(21)
+#define I2C_STAT_PORT_BUSY PPC_BIT(22)
+#define I2C_STAT_INTERFACE_BUSY PPC_BIT(23)
+#define I2C_STAT_FIFO_ENTRY_COUNT PPC_BITMASK(24, 31)
+
+#define I2C_STAT_ANY_ERR (I2C_STAT_INVALID_CMD | I2C_STAT_LBUS_PARITY_ERR | \
+ I2C_STAT_BKEND_OVERRUN_ERR | \
+ I2C_STAT_BKEND_ACCESS_ERR | I2C_STAT_ARBT_LOST_ERR | \
+ I2C_STAT_NACK_RCVD_ERR | I2C_STAT_STOP_ERR)
+
+
+#define I2C_INTR_ACTIVE \
+ ((I2C_STAT_ANY_ERR >> 16) | I2C_INTR_CMD_COMP | I2C_INTR_DATA_REQ)
+
+/* Pseudo-status used for timeouts */
+#define I2C_STAT_PSEUDO_TIMEOUT PPC_BIT(63)
+
+/* I2C extended status register */
+#define I2C_EXTD_STAT_REG 0xc
+#define I2C_EXTD_STAT_FIFO_SIZE PPC_BITMASK(0, 7)
+#define I2C_EXTD_STAT_MSM_CURSTATE PPC_BITMASK(11, 15)
+#define I2C_EXTD_STAT_SCL_IN_SYNC PPC_BIT(16)
+#define I2C_EXTD_STAT_SDA_IN_SYNC PPC_BIT(17)
+#define I2C_EXTD_STAT_S_SCL PPC_BIT(18)
+#define I2C_EXTD_STAT_S_SDA PPC_BIT(19)
+#define I2C_EXTD_STAT_M_SCL PPC_BIT(20)
+#define I2C_EXTD_STAT_M_SDA PPC_BIT(21)
+#define I2C_EXTD_STAT_HIGH_WATER PPC_BIT(22)
+#define I2C_EXTD_STAT_LOW_WATER PPC_BIT(23)
+#define I2C_EXTD_STAT_I2C_BUSY PPC_BIT(24)
+#define I2C_EXTD_STAT_SELF_BUSY PPC_BIT(25)
+#define I2C_EXTD_STAT_I2C_VERSION PPC_BITMASK(27, 31)
+
+/* I2C residual front end/back end length */
+#define I2C_RESIDUAL_LEN_REG 0xd
+#define I2C_RESIDUAL_FRONT_END PPC_BITMASK(0, 15)
+#define I2C_RESIDUAL_BACK_END PPC_BITMASK(16, 31)
+
+/* Port busy register */
+#define I2C_PORT_BUSY_REG 0xe
+#define I2C_SET_S_SCL_REG 0xd
+#define I2C_RESET_S_SCL_REG 0xf
+#define I2C_SET_S_SDA_REG 0x10
+#define I2C_RESET_S_SDA_REG 0x11
+
+#define PNV_I2C_FIFO_SIZE 8
+#define PNV_I2C_MAX_BUSSES 64
+
+#endif /* PNV_I2C_REGS_H */
diff --git a/include/hw/i2c/ppc4xx_i2c.h b/include/hw/i2c/ppc4xx_i2c.h
index f6f837fbec..4e882fa3c8 100644
--- a/include/hw/i2c/ppc4xx_i2c.h
+++ b/include/hw/i2c/ppc4xx_i2c.h
@@ -29,11 +29,12 @@
#include "hw/sysbus.h"
#include "hw/i2c/bitbang_i2c.h"
+#include "qom/object.h"
#define TYPE_PPC4xx_I2C "ppc4xx-i2c"
-#define PPC4xx_I2C(obj) OBJECT_CHECK(PPC4xxI2CState, (obj), TYPE_PPC4xx_I2C)
+OBJECT_DECLARE_SIMPLE_TYPE(PPC4xxI2CState, PPC4xx_I2C)
-typedef struct PPC4xxI2CState {
+struct PPC4xxI2CState {
/*< private >*/
SysBusDevice parent_obj;
@@ -57,6 +58,6 @@ typedef struct PPC4xxI2CState {
uint8_t xfrcnt;
uint8_t xtcntlss;
uint8_t directcntl;
-} PPC4xxI2CState;
+};
#endif /* PPC4XX_I2C_H */
diff --git a/include/hw/i2c/smbus_slave.h b/include/hw/i2c/smbus_slave.h
index ebe068304e..86bfe0a79e 100644
--- a/include/hw/i2c/smbus_slave.h
+++ b/include/hw/i2c/smbus_slave.h
@@ -26,19 +26,14 @@
#define HW_SMBUS_SLAVE_H
#include "hw/i2c/i2c.h"
+#include "qom/object.h"
#define TYPE_SMBUS_DEVICE "smbus-device"
-#define SMBUS_DEVICE(obj) \
- OBJECT_CHECK(SMBusDevice, (obj), TYPE_SMBUS_DEVICE)
-#define SMBUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SMBusDeviceClass, (klass), TYPE_SMBUS_DEVICE)
-#define SMBUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SMBusDeviceClass, (obj), TYPE_SMBUS_DEVICE)
+OBJECT_DECLARE_TYPE(SMBusDevice, SMBusDeviceClass,
+ SMBUS_DEVICE)
-typedef struct SMBusDevice SMBusDevice;
-typedef struct SMBusDeviceClass
-{
+struct SMBusDeviceClass {
I2CSlaveClass parent_class;
/*
@@ -67,7 +62,7 @@ typedef struct SMBusDeviceClass
* return 0xff in that case.
*/
uint8_t (*receive_byte)(SMBusDevice *dev);
-} SMBusDeviceClass;
+};
#define SMBUS_DATA_MAX_LEN 34 /* command + len + 32 bytes of data. */
diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h
index da1d2fe155..eb606d6076 100644
--- a/include/hw/i386/apic.h
+++ b/include/hw/i386/apic.h
@@ -3,16 +3,14 @@
/* apic.c */
-void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
- uint8_t vector_num, uint8_t trigger_mode);
+void apic_set_max_apic_id(uint32_t max_apic_id);
int apic_accept_pic_intr(DeviceState *s);
void apic_deliver_pic_intr(DeviceState *s, int level);
void apic_deliver_nmi(DeviceState *d);
int apic_get_interrupt(DeviceState *s);
-void apic_reset_irq_delivered(void);
-int apic_get_irq_delivered(void);
-void cpu_set_apic_base(DeviceState *s, uint64_t val);
+int cpu_set_apic_base(DeviceState *s, uint64_t val);
uint64_t cpu_get_apic_base(DeviceState *s);
+bool cpu_is_apic_enabled(DeviceState *s);
void cpu_set_apic_tpr(DeviceState *s, uint8_t val);
uint8_t cpu_get_apic_tpr(DeviceState *s);
void apic_init_reset(DeviceState *s);
@@ -20,6 +18,9 @@ void apic_sipi(DeviceState *s);
void apic_poll_irq(DeviceState *d);
void apic_designate_bsp(DeviceState *d, bool bsp);
int apic_get_highest_priority_irr(DeviceState *dev);
+int apic_msr_read(int index, uint64_t *val);
+int apic_msr_write(int index, uint64_t val);
+bool is_x2apic_mode(DeviceState *d);
/* pc.c */
DeviceState *cpu_get_current_apic(void);
diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h
index 2597000e03..d6e85833da 100644
--- a/include/hw/i386/apic_internal.h
+++ b/include/hw/i386/apic_internal.h
@@ -7,7 +7,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,6 +25,7 @@
#include "exec/memory.h"
#include "qemu/timer.h"
#include "target/i386/cpu-qom.h"
+#include "qom/object.h"
/* APIC Local Vector Table */
#define APIC_LVT_TIMER 0
@@ -45,8 +46,10 @@
#define APIC_DM_EXTINT 7
/* APIC destination mode */
-#define APIC_DESTMODE_FLAT 0xf
-#define APIC_DESTMODE_CLUSTER 1
+#define APIC_DESTMODE_PHYSICAL 0
+#define APIC_DESTMODE_LOGICAL 1
+#define APIC_DESTMODE_LOGICAL_FLAT 0xf
+#define APIC_DESTMODE_LOGICAL_CLUSTER 0
#define APIC_TRIGGER_EDGE 0
#define APIC_TRIGGER_LEVEL 1
@@ -125,20 +128,16 @@
typedef struct APICCommonState APICCommonState;
#define TYPE_APIC_COMMON "apic-common"
-#define APIC_COMMON(obj) \
- OBJECT_CHECK(APICCommonState, (obj), TYPE_APIC_COMMON)
-#define APIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(APICCommonClass, (klass), TYPE_APIC_COMMON)
-#define APIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(APICCommonClass, (obj), TYPE_APIC_COMMON)
-
-typedef struct APICCommonClass
-{
+typedef struct APICCommonClass APICCommonClass;
+DECLARE_OBJ_CHECKERS(APICCommonState, APICCommonClass,
+ APIC_COMMON, TYPE_APIC_COMMON)
+
+struct APICCommonClass {
DeviceClass parent_class;
DeviceRealize realize;
DeviceUnrealize unrealize;
- void (*set_base)(APICCommonState *s, uint64_t val);
+ int (*set_base)(APICCommonState *s, uint64_t val);
void (*set_tpr)(APICCommonState *s, uint8_t val);
uint8_t (*get_tpr)(APICCommonState *s);
void (*enable_tpr_reporting)(APICCommonState *s, bool enable);
@@ -151,7 +150,7 @@ typedef struct APICCommonClass
* device, but it's convenient to have it here for now.
*/
void (*send_msi)(MSIMessage *msi);
-} APICCommonClass;
+};
struct APICCommonState {
/*< private >*/
@@ -190,6 +189,7 @@ struct APICCommonState {
DeviceState *vapic;
hwaddr vapic_paddr; /* note: persistence via kvmvapic */
bool legacy_instance_id;
+ uint32_t extended_log_dest;
};
typedef struct VAPICState {
@@ -202,7 +202,6 @@ typedef struct VAPICState {
extern bool apic_report_tpr_access;
-void apic_report_irq_delivered(int delivered);
bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_tpr_access_reporting(DeviceState *d, bool enable);
void apic_enable_vapic(DeviceState *d, hwaddr paddr);
@@ -229,6 +228,6 @@ static inline int apic_get_bit(uint32_t *tab, int index)
return !!(tab[i] & mask);
}
-APICCommonClass *apic_get_class(void);
+APICCommonClass *apic_get_class(Error **errp);
#endif /* QEMU_APIC_INTERNAL_H */
diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h
new file mode 100644
index 0000000000..846c726085
--- /dev/null
+++ b/include/hw/i386/hostmem-epc.h
@@ -0,0 +1,28 @@
+/*
+ * SGX EPC backend
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Authors:
+ * Sean Christopherson <sean.j.christopherson@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_HOSTMEM_EPC_H
+#define QEMU_HOSTMEM_EPC_H
+
+#include "sysemu/hostmem.h"
+
+#define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc"
+
+#define MEMORY_BACKEND_EPC(obj) \
+ OBJECT_CHECK(HostMemoryBackendEpc, (obj), TYPE_MEMORY_BACKEND_EPC)
+
+typedef struct HostMemoryBackendEpc HostMemoryBackendEpc;
+
+struct HostMemoryBackendEpc {
+ HostMemoryBackend parent_obj;
+};
+
+#endif
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index 3870052f5f..7fa0a695c8 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -24,10 +24,10 @@
#include "hw/i386/x86-iommu.h"
#include "qemu/iova-tree.h"
+#include "qom/object.h"
#define TYPE_INTEL_IOMMU_DEVICE "intel-iommu"
-#define INTEL_IOMMU_DEVICE(obj) \
- OBJECT_CHECK(IntelIOMMUState, (obj), TYPE_INTEL_IOMMU_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE)
#define TYPE_INTEL_IOMMU_MEMORY_REGION "intel-iommu-iommu-memory-region"
@@ -56,10 +56,8 @@
typedef struct VTDContextEntry VTDContextEntry;
typedef struct VTDContextCacheEntry VTDContextCacheEntry;
-typedef struct IntelIOMMUState IntelIOMMUState;
typedef struct VTDAddressSpace VTDAddressSpace;
typedef struct VTDIOTLBEntry VTDIOTLBEntry;
-typedef struct VTDBus VTDBus;
typedef union VTD_IR_TableEntry VTD_IR_TableEntry;
typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress;
typedef struct VTDPASIDDirEntry VTDPASIDDirEntry;
@@ -99,28 +97,61 @@ struct VTDPASIDEntry {
struct VTDAddressSpace {
PCIBus *bus;
uint8_t devfn;
+ uint32_t pasid;
AddressSpace as;
IOMMUMemoryRegion iommu;
MemoryRegion root; /* The root container of the device */
MemoryRegion nodmar; /* The alias of shared nodmar MR */
MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */
+ MemoryRegion iommu_ir_fault; /* Interrupt region for catching fault */
IntelIOMMUState *iommu_state;
VTDContextCacheEntry context_cache_entry;
QLIST_ENTRY(VTDAddressSpace) next;
/* Superset of notifier flags that this address space has */
IOMMUNotifierFlag notifier_flags;
- IOVATree *iova_tree; /* Traces mapped IOVA ranges */
-};
-
-struct VTDBus {
- PCIBus* bus; /* A reference to the bus to provide translation for */
- /* A table of VTDAddressSpace objects indexed by devfn */
- VTDAddressSpace *dev_as[];
+ /*
+ * @iova_tree traces mapped IOVA ranges.
+ *
+ * The tree is not needed if no MAP notifier is registered with current
+ * VTD address space, because all guest invalidate commands can be
+ * directly passed to the IOMMU UNMAP notifiers without any further
+ * reshuffling.
+ *
+ * The tree OTOH is required for MAP typed iommu notifiers for a few
+ * reasons.
+ *
+ * Firstly, there's no way to identify whether an PSI (Page Selective
+ * Invalidations) or DSI (Domain Selective Invalidations) event is an
+ * MAP or UNMAP event within the message itself. Without having prior
+ * knowledge of existing state vIOMMU doesn't know whether it should
+ * notify MAP or UNMAP for a PSI message it received when caching mode
+ * is enabled (for MAP notifiers).
+ *
+ * Secondly, PSI messages received from guest driver can be enlarged in
+ * range, covers but not limited to what the guest driver wanted to
+ * invalidate. When the range to invalidates gets bigger than the
+ * limit of a PSI message, it can even become a DSI which will
+ * invalidate the whole domain. If the vIOMMU directly notifies the
+ * registered device with the unmodified range, it may confuse the
+ * registered drivers (e.g. vfio-pci) on either:
+ *
+ * (1) Trying to map the same region more than once (for
+ * VFIO_IOMMU_MAP_DMA, -EEXIST will trigger), or,
+ *
+ * (2) Trying to UNMAP a range that is still partially mapped.
+ *
+ * That accuracy is not required for UNMAP-only notifiers, but it is a
+ * must-to-have for notifiers registered with MAP events, because the
+ * vIOMMU needs to make sure the shadow page table is always in sync
+ * with the guest IOMMU pgtables for a device.
+ */
+ IOVATree *iova_tree;
};
struct VTDIOTLBEntry {
uint64_t gfn;
uint16_t domain_id;
+ uint32_t pasid;
uint64_t slpte;
uint64_t mask;
uint8_t access_flags;
@@ -146,38 +177,40 @@ enum {
/* Interrupt Remapping Table Entry Definition */
union VTD_IR_TableEntry {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint32_t __reserved_1:8; /* Reserved 1 */
- uint32_t vector:8; /* Interrupt Vector */
- uint32_t irte_mode:1; /* IRTE Mode */
- uint32_t __reserved_0:3; /* Reserved 0 */
- uint32_t __avail:4; /* Available spaces for software */
- uint32_t delivery_mode:3; /* Delivery Mode */
- uint32_t trigger_mode:1; /* Trigger Mode */
- uint32_t redir_hint:1; /* Redirection Hint */
- uint32_t dest_mode:1; /* Destination Mode */
- uint32_t fault_disable:1; /* Fault Processing Disable */
- uint32_t present:1; /* Whether entry present/available */
+#if HOST_BIG_ENDIAN
+ uint64_t dest_id:32; /* Destination ID */
+ uint64_t __reserved_1:8; /* Reserved 1 */
+ uint64_t vector:8; /* Interrupt Vector */
+ uint64_t irte_mode:1; /* IRTE Mode */
+ uint64_t __reserved_0:3; /* Reserved 0 */
+ uint64_t __avail:4; /* Available spaces for software */
+ uint64_t delivery_mode:3; /* Delivery Mode */
+ uint64_t trigger_mode:1; /* Trigger Mode */
+ uint64_t redir_hint:1; /* Redirection Hint */
+ uint64_t dest_mode:1; /* Destination Mode */
+ uint64_t fault_disable:1; /* Fault Processing Disable */
+ uint64_t present:1; /* Whether entry present/available */
#else
- uint32_t present:1; /* Whether entry present/available */
- uint32_t fault_disable:1; /* Fault Processing Disable */
- uint32_t dest_mode:1; /* Destination Mode */
- uint32_t redir_hint:1; /* Redirection Hint */
- uint32_t trigger_mode:1; /* Trigger Mode */
- uint32_t delivery_mode:3; /* Delivery Mode */
- uint32_t __avail:4; /* Available spaces for software */
- uint32_t __reserved_0:3; /* Reserved 0 */
- uint32_t irte_mode:1; /* IRTE Mode */
- uint32_t vector:8; /* Interrupt Vector */
- uint32_t __reserved_1:8; /* Reserved 1 */
+ uint64_t present:1; /* Whether entry present/available */
+ uint64_t fault_disable:1; /* Fault Processing Disable */
+ uint64_t dest_mode:1; /* Destination Mode */
+ uint64_t redir_hint:1; /* Redirection Hint */
+ uint64_t trigger_mode:1; /* Trigger Mode */
+ uint64_t delivery_mode:3; /* Delivery Mode */
+ uint64_t __avail:4; /* Available spaces for software */
+ uint64_t __reserved_0:3; /* Reserved 0 */
+ uint64_t irte_mode:1; /* IRTE Mode */
+ uint64_t vector:8; /* Interrupt Vector */
+ uint64_t __reserved_1:8; /* Reserved 1 */
+ uint64_t dest_id:32; /* Destination ID */
#endif
- uint32_t dest_id; /* Destination ID */
- uint16_t source_id; /* Source-ID */
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint64_t __reserved_2:44; /* Reserved 2 */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t sid_q:2; /* Source-ID Qualifier */
+ uint64_t source_id:16; /* Source-ID */
#else
+ uint64_t source_id:16; /* Source-ID */
uint64_t sid_q:2; /* Source-ID Qualifier */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t __reserved_2:44; /* Reserved 2 */
@@ -192,7 +225,7 @@ union VTD_IR_TableEntry {
/* Programming format for MSI/MSI-X addresses */
union VTD_IR_MSIAddress {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint32_t __head:12; /* Should always be: 0x0fee */
uint32_t index_l:15; /* Interrupt index bit 14-0 */
uint32_t int_mode:1; /* Interrupt format */
@@ -229,6 +262,7 @@ struct IntelIOMMUState {
bool caching_mode; /* RO - is cap CM enabled? */
bool scalable_mode; /* RO - is Scalable Mode supported? */
+ bool snoop_control; /* RO - is SNP filed supported? */
dma_addr_t root; /* Current root table pointer */
bool root_scalable; /* Type of root table (scalable or not) */
@@ -253,8 +287,8 @@ struct IntelIOMMUState {
uint32_t context_cache_gen; /* Should be in [1,MAX] */
GHashTable *iotlb; /* IOTLB */
- GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */
- VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */
+ GHashTable *vtd_address_spaces; /* VTD address spaces */
+ VTDAddressSpace *vtd_as_cache[VTD_PCI_BUS_MAX]; /* VTD address space cache */
/* list of registered notifiers */
QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers;
@@ -267,6 +301,8 @@ struct IntelIOMMUState {
bool buggy_eim; /* Force buggy EIM unless eim=off */
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
bool dma_drain; /* Whether DMA r/w draining enabled */
+ bool dma_translation; /* Whether DMA translation supported */
+ bool pasid; /* Whether to support PASID */
/*
* Protects IOMMU states in general. Currently it protects the
@@ -278,6 +314,7 @@ struct IntelIOMMUState {
/* Find the VTD Address space associated with the given bus pointer,
* create a new one if none exists
*/
-VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn);
+VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
+ int devfn, unsigned int pasid);
#endif
diff --git a/include/hw/i386/ioapic_internal.h b/include/hw/i386/ioapic_internal.h
deleted file mode 100644
index fe06938bda..0000000000
--- a/include/hw/i386/ioapic_internal.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * IOAPIC emulation logic - internal interfaces
- *
- * Copyright (c) 2004-2005 Fabrice Bellard
- * Copyright (c) 2009 Xiantao Zhang, Intel
- * Copyright (c) 2011 Jan Kiszka, Siemens AG
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef QEMU_IOAPIC_INTERNAL_H
-#define QEMU_IOAPIC_INTERNAL_H
-
-#include "exec/memory.h"
-#include "hw/sysbus.h"
-#include "qemu/notify.h"
-
-#define MAX_IOAPICS 1
-
-#define IOAPIC_LVT_DEST_SHIFT 56
-#define IOAPIC_LVT_DEST_IDX_SHIFT 48
-#define IOAPIC_LVT_MASKED_SHIFT 16
-#define IOAPIC_LVT_TRIGGER_MODE_SHIFT 15
-#define IOAPIC_LVT_REMOTE_IRR_SHIFT 14
-#define IOAPIC_LVT_POLARITY_SHIFT 13
-#define IOAPIC_LVT_DELIV_STATUS_SHIFT 12
-#define IOAPIC_LVT_DEST_MODE_SHIFT 11
-#define IOAPIC_LVT_DELIV_MODE_SHIFT 8
-
-#define IOAPIC_LVT_MASKED (1 << IOAPIC_LVT_MASKED_SHIFT)
-#define IOAPIC_LVT_TRIGGER_MODE (1 << IOAPIC_LVT_TRIGGER_MODE_SHIFT)
-#define IOAPIC_LVT_REMOTE_IRR (1 << IOAPIC_LVT_REMOTE_IRR_SHIFT)
-#define IOAPIC_LVT_POLARITY (1 << IOAPIC_LVT_POLARITY_SHIFT)
-#define IOAPIC_LVT_DELIV_STATUS (1 << IOAPIC_LVT_DELIV_STATUS_SHIFT)
-#define IOAPIC_LVT_DEST_MODE (1 << IOAPIC_LVT_DEST_MODE_SHIFT)
-#define IOAPIC_LVT_DELIV_MODE (7 << IOAPIC_LVT_DELIV_MODE_SHIFT)
-
-/* Bits that are read-only for IOAPIC entry */
-#define IOAPIC_RO_BITS (IOAPIC_LVT_REMOTE_IRR | \
- IOAPIC_LVT_DELIV_STATUS)
-#define IOAPIC_RW_BITS (~(uint64_t)IOAPIC_RO_BITS)
-
-#define IOAPIC_TRIGGER_EDGE 0
-#define IOAPIC_TRIGGER_LEVEL 1
-
-/*io{apic,sapic} delivery mode*/
-#define IOAPIC_DM_FIXED 0x0
-#define IOAPIC_DM_LOWEST_PRIORITY 0x1
-#define IOAPIC_DM_PMI 0x2
-#define IOAPIC_DM_NMI 0x4
-#define IOAPIC_DM_INIT 0x5
-#define IOAPIC_DM_SIPI 0x6
-#define IOAPIC_DM_EXTINT 0x7
-#define IOAPIC_DM_MASK 0x7
-
-#define IOAPIC_VECTOR_MASK 0xff
-
-#define IOAPIC_IOREGSEL 0x00
-#define IOAPIC_IOWIN 0x10
-#define IOAPIC_EOI 0x40
-
-#define IOAPIC_REG_ID 0x00
-#define IOAPIC_REG_VER 0x01
-#define IOAPIC_REG_ARB 0x02
-#define IOAPIC_REG_REDTBL_BASE 0x10
-#define IOAPIC_ID 0x00
-
-#define IOAPIC_ID_SHIFT 24
-#define IOAPIC_ID_MASK 0xf
-
-#define IOAPIC_VER_ENTRIES_SHIFT 16
-
-typedef struct IOAPICCommonState IOAPICCommonState;
-
-#define TYPE_IOAPIC_COMMON "ioapic-common"
-#define IOAPIC_COMMON(obj) \
- OBJECT_CHECK(IOAPICCommonState, (obj), TYPE_IOAPIC_COMMON)
-#define IOAPIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(IOAPICCommonClass, (klass), TYPE_IOAPIC_COMMON)
-#define IOAPIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IOAPICCommonClass, (obj), TYPE_IOAPIC_COMMON)
-
-typedef struct IOAPICCommonClass {
- SysBusDeviceClass parent_class;
-
- DeviceRealize realize;
- DeviceUnrealize unrealize;
- void (*pre_save)(IOAPICCommonState *s);
- void (*post_load)(IOAPICCommonState *s);
-} IOAPICCommonClass;
-
-struct IOAPICCommonState {
- SysBusDevice busdev;
- MemoryRegion io_memory;
- uint8_t id;
- uint8_t ioregsel;
- uint32_t irr;
- uint64_t ioredtbl[IOAPIC_NUM_PINS];
- Notifier machine_done;
- uint8_t version;
- uint64_t irq_count[IOAPIC_NUM_PINS];
- int irq_level[IOAPIC_NUM_PINS];
- int irq_eoi[IOAPIC_NUM_PINS];
- QEMUTimer *delayed_ioapic_service_timer;
-};
-
-void ioapic_reset_common(DeviceState *dev);
-
-void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s);
-void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level);
-
-#endif /* QEMU_IOAPIC_INTERNAL_H */
diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h
index fd34b78e0d..fad97a891d 100644
--- a/include/hw/i386/microvm.h
+++ b/include/hw/i386/microvm.h
@@ -18,54 +18,94 @@
#ifndef HW_I386_MICROVM_H
#define HW_I386_MICROVM_H
-#include "qemu-common.h"
#include "exec/hwaddr.h"
#include "qemu/notify.h"
#include "hw/boards.h"
#include "hw/i386/x86.h"
+#include "hw/acpi/acpi_dev_interface.h"
+#include "hw/pci-host/gpex.h"
+#include "qom/object.h"
+
+/*
+ * IRQ | pc | microvm (acpi=on)
+ * --------+------------+------------------
+ * 0 | pit |
+ * 1 | kbd |
+ * 2 | cascade |
+ * 3 | serial 1 |
+ * 4 | serial 0 | serial
+ * 5 | - |
+ * 6 | floppy |
+ * 7 | parallel |
+ * 8 | rtc | rtc (rtc=on)
+ * 9 | acpi | acpi (ged)
+ * 10 | pci lnk | xhci (usb=on)
+ * 11 | pci lnk |
+ * 12 | ps2 | pcie
+ * 13 | fpu | pcie
+ * 14 | ide 0 | pcie
+ * 15 | ide 1 | pcie
+ * 16-23 | pci gsi | virtio
+ */
/* Platform virtio definitions */
#define VIRTIO_MMIO_BASE 0xfeb00000
-#define VIRTIO_IRQ_BASE 5
-#define VIRTIO_NUM_TRANSPORTS 8
#define VIRTIO_CMDLINE_MAXLEN 64
+#define GED_MMIO_BASE 0xfea00000
+#define GED_MMIO_BASE_MEMHP (GED_MMIO_BASE + 0x100)
+#define GED_MMIO_BASE_REGS (GED_MMIO_BASE + 0x200)
+#define GED_MMIO_IRQ 9
+
+#define MICROVM_XHCI_BASE 0xfe900000
+#define MICROVM_XHCI_IRQ 10
+
+#define PCIE_MMIO_BASE 0xc0000000
+#define PCIE_MMIO_SIZE 0x20000000
+#define PCIE_ECAM_BASE 0xe0000000
+#define PCIE_ECAM_SIZE 0x10000000
+
/* Machine type options */
-#define MICROVM_MACHINE_PIT "pit"
-#define MICROVM_MACHINE_PIC "pic"
#define MICROVM_MACHINE_RTC "rtc"
+#define MICROVM_MACHINE_PCIE "pcie"
+#define MICROVM_MACHINE_IOAPIC2 "ioapic2"
#define MICROVM_MACHINE_ISA_SERIAL "isa-serial"
#define MICROVM_MACHINE_OPTION_ROMS "x-option-roms"
#define MICROVM_MACHINE_AUTO_KERNEL_CMDLINE "auto-kernel-cmdline"
-typedef struct {
+struct MicrovmMachineClass {
X86MachineClass parent;
HotplugHandler *(*orig_hotplug_handler)(MachineState *machine,
DeviceState *dev);
-} MicrovmMachineClass;
+};
-typedef struct {
+struct MicrovmMachineState {
X86MachineState parent;
/* Machine type options */
- OnOffAuto pic;
- OnOffAuto pit;
OnOffAuto rtc;
+ OnOffAuto pcie;
+ OnOffAuto ioapic2;
bool isa_serial;
bool option_roms;
bool auto_kernel_cmdline;
/* Machine state */
+ uint32_t pcie_irq_base;
+ uint32_t virtio_irq_base;
+ uint32_t virtio_num_transports;
bool kernel_cmdline_fixed;
-} MicrovmMachineState;
+ Notifier machine_done;
+ Notifier powerdown_req;
+ struct GPEXConfig gpex;
+
+ /* device tree */
+ void *fdt;
+ uint32_t ioapic_phandle[2];
+};
#define TYPE_MICROVM_MACHINE MACHINE_TYPE_NAME("microvm")
-#define MICROVM_MACHINE(obj) \
- OBJECT_CHECK(MicrovmMachineState, (obj), TYPE_MICROVM_MACHINE)
-#define MICROVM_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MicrovmMachineClass, obj, TYPE_MICROVM_MACHINE)
-#define MICROVM_MACHINE_CLASS(class) \
- OBJECT_CLASS_CHECK(MicrovmMachineClass, class, TYPE_MICROVM_MACHINE)
+OBJECT_DECLARE_TYPE(MicrovmMachineState, MicrovmMachineClass, MICROVM_MACHINE)
#endif
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 3d7ed3a55e..e52290916c 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -3,23 +3,25 @@
#include "qemu/notify.h"
#include "qapi/qapi-types-common.h"
+#include "qemu/uuid.h"
#include "hw/boards.h"
#include "hw/block/fdc.h"
#include "hw/block/flash.h"
#include "hw/i386/x86.h"
-#include "hw/acpi/acpi_dev_interface.h"
#include "hw/hotplug.h"
+#include "qom/object.h"
+#include "hw/i386/sgx-epc.h"
+#include "hw/cxl/cxl.h"
-#define HPET_INTCAP "hpet-intcap"
+#define MAX_IDE_BUS 2
/**
* PCMachineState:
* @acpi_dev: link to ACPI PM device that performs ACPI hotplug handling
* @boot_cpus: number of present VCPUs
- * @smp_dies: number of dies per one package
*/
-struct PCMachineState {
+typedef struct PCMachineState {
/*< private >*/
X86MachineState parent_obj;
@@ -29,36 +31,43 @@ struct PCMachineState {
Notifier machine_done;
/* Pointers to devices and objects: */
- HotplugHandler *acpi_dev;
- PCIBus *bus;
+ PCIBus *pcibus;
I2CBus *smbus;
PFlashCFI01 *flash[2];
ISADevice *pcspk;
+ DeviceState *iommu;
+ BusState *idebus[MAX_IDE_BUS];
/* Configuration options: */
uint64_t max_ram_below_4g;
OnOffAuto vmport;
+ SmbiosEntryPointType smbios_entry_point_type;
+ const char *south_bridge;
bool acpi_build_enabled;
bool smbus_enabled;
bool sata_enabled;
- bool pit_enabled;
-
- /* NUMA information: */
- uint64_t numa_nodes;
- uint64_t *node_mem;
+ bool hpet_enabled;
+ bool i8042_enabled;
+ bool default_bus_bypass_iommu;
+ bool fd_bootchk;
+ uint64_t max_fw_size;
/* ACPI Memory hotplug IO base address */
hwaddr memhp_io_base;
-};
+
+ SGXEPCState sgx_epc;
+ CXLState cxl_devices_state;
+} PCMachineState;
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g"
-#define PC_MACHINE_DEVMEM_REGION_SIZE "device-memory-region-size"
#define PC_MACHINE_VMPORT "vmport"
#define PC_MACHINE_SMBUS "smbus"
#define PC_MACHINE_SATA "sata"
-#define PC_MACHINE_PIT "pit"
+#define PC_MACHINE_I8042 "i8042"
+#define PC_MACHINE_MAX_FW_SIZE "max-fw-size"
+#define PC_MACHINE_SMBIOS_EP "smbios-entry-point-type"
/**
* PCMachineClass:
@@ -76,7 +85,7 @@ struct PCMachineState {
* way we can use 1GByte pages in the host.
*
*/
-typedef struct PCMachineClass {
+struct PCMachineClass {
/*< private >*/
X86MachineClass parent_class;
@@ -84,8 +93,7 @@ typedef struct PCMachineClass {
/* Device configuration: */
bool pci_enabled;
- bool kvmclock_enabled;
- const char *default_nic_model;
+ const char *default_south_bridge;
/* Compat options: */
@@ -97,51 +105,55 @@ typedef struct PCMachineClass {
bool rsdp_in_ram;
int legacy_acpi_table_size;
unsigned acpi_data_size;
- bool do_not_add_smb_acpi;
+ int pci_root_uid;
/* SMBIOS compat: */
bool smbios_defaults;
bool smbios_legacy_mode;
bool smbios_uuid_encoded;
+ SmbiosEntryPointType default_smbios_ep_type;
/* RAM / address space compat: */
bool gigabyte_align;
bool has_reserved_memory;
bool enforce_aligned_dimm;
bool broken_reserved_end;
+ bool enforce_amd_1tb_hole;
/* generate legacy CPU hotplug AML */
bool legacy_cpu_hotplug;
- /* use DMA capable linuxboot option rom */
- bool linuxboot_dma_enabled;
-
/* use PVH to load kernels that support this feature */
bool pvh_enabled;
-} PCMachineClass;
+
+ /* create kvmclock device even when KVM PV features are not exposed */
+ bool kvmclock_create_always;
+
+ /* resizable acpi blob compat */
+ bool resizable_acpi_blob;
+
+ /*
+ * whether the machine type implements broken 32-bit address space bound
+ * check for memory.
+ */
+ bool broken_32bit_mem_addr_check;
+};
#define TYPE_PC_MACHINE "generic-pc-machine"
-#define PC_MACHINE(obj) \
- OBJECT_CHECK(PCMachineState, (obj), TYPE_PC_MACHINE)
-#define PC_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCMachineClass, (obj), TYPE_PC_MACHINE)
-#define PC_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE)
+OBJECT_DECLARE_TYPE(PCMachineState, PCMachineClass, PC_MACHINE)
/* ioapic.c */
GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled);
/* pc.c */
-extern int fd_bootchk;
void pc_acpi_smi_interrupt(void *opaque, int irq, int level);
-void pc_hot_add_cpu(MachineState *ms, const int64_t id, Error **errp);
-void pc_smp_parse(MachineState *ms, QemuOpts *opts);
-
-void pc_guest_info_init(PCMachineState *pcms);
-
+#define PCI_HOST_PROP_RAM_MEM "ram-mem"
+#define PCI_HOST_PROP_PCI_MEM "pci-mem"
+#define PCI_HOST_PROP_SYSTEM_MEM "system-mem"
+#define PCI_HOST_PROP_IO_MEM "io-mem"
#define PCI_HOST_PROP_PCI_HOLE_START "pci-hole-start"
#define PCI_HOST_PROP_PCI_HOLE_END "pci-hole-end"
#define PCI_HOST_PROP_PCI_HOLE64_START "pci-hole64-start"
@@ -149,36 +161,28 @@ void pc_guest_info_init(PCMachineState *pcms);
#define PCI_HOST_PROP_PCI_HOLE64_SIZE "pci-hole64-size"
#define PCI_HOST_BELOW_4G_MEM_SIZE "below-4g-mem-size"
#define PCI_HOST_ABOVE_4G_MEM_SIZE "above-4g-mem-size"
+#define PCI_HOST_PROP_SMM_RANGES "smm-ranges"
-void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory,
+void pc_pci_as_mapping_init(MemoryRegion *system_memory,
MemoryRegion *pci_address_space);
void xen_load_linux(PCMachineState *pcms);
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
- MemoryRegion **ram_memory);
+ uint64_t pci_hole64_size);
uint64_t pc_pci_hole64_start(void);
DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus);
void pc_basic_device_init(struct PCMachineState *pcms,
ISABus *isa_bus, qemu_irq *gsi,
- ISADevice **rtc_state,
+ ISADevice *rtc_state,
bool create_fdctrl,
uint32_t hpet_irqs);
-void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd);
-void pc_cmos_init(PCMachineState *pcms,
- BusState *ide0, BusState *ide1,
- ISADevice *s);
void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus);
-void pc_pci_device_init(PCIBus *pci_bus);
-
-typedef void (*cpu_set_smm_t)(int smm, void *arg);
void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs);
-ISADevice *pc_find_fdc0(void);
-
/* port92.c */
#define PORT92_A20_LINE "a20"
@@ -188,10 +192,48 @@ ISADevice *pc_find_fdc0(void);
void pc_system_flash_create(PCMachineState *pcms);
void pc_system_flash_cleanup_unused(PCMachineState *pcms);
void pc_system_firmware_init(PCMachineState *pcms, MemoryRegion *rom_memory);
+bool pc_system_ovmf_table_find(const char *entry, uint8_t **data,
+ int *data_len);
+void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size);
+
+/* sgx.c */
+void pc_machine_init_sgx_epc(PCMachineState *pcms);
+
+extern GlobalProperty pc_compat_9_0[];
+extern const size_t pc_compat_9_0_len;
+
+extern GlobalProperty pc_compat_8_2[];
+extern const size_t pc_compat_8_2_len;
-/* acpi-build.c */
-void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
- const CPUArchIdList *apic_ids, GArray *entry);
+extern GlobalProperty pc_compat_8_1[];
+extern const size_t pc_compat_8_1_len;
+
+extern GlobalProperty pc_compat_8_0[];
+extern const size_t pc_compat_8_0_len;
+
+extern GlobalProperty pc_compat_7_2[];
+extern const size_t pc_compat_7_2_len;
+
+extern GlobalProperty pc_compat_7_1[];
+extern const size_t pc_compat_7_1_len;
+
+extern GlobalProperty pc_compat_7_0[];
+extern const size_t pc_compat_7_0_len;
+
+extern GlobalProperty pc_compat_6_2[];
+extern const size_t pc_compat_6_2_len;
+
+extern GlobalProperty pc_compat_6_1[];
+extern const size_t pc_compat_6_1_len;
+
+extern GlobalProperty pc_compat_6_0[];
+extern const size_t pc_compat_6_0_len;
+
+extern GlobalProperty pc_compat_5_2[];
+extern const size_t pc_compat_5_2_len;
+
+extern GlobalProperty pc_compat_5_1[];
+extern const size_t pc_compat_5_1_len;
extern GlobalProperty pc_compat_5_0[];
extern const size_t pc_compat_5_0_len;
@@ -250,26 +292,6 @@ extern const size_t pc_compat_2_1_len;
extern GlobalProperty pc_compat_2_0[];
extern const size_t pc_compat_2_0_len;
-extern GlobalProperty pc_compat_1_7[];
-extern const size_t pc_compat_1_7_len;
-
-extern GlobalProperty pc_compat_1_6[];
-extern const size_t pc_compat_1_6_len;
-
-extern GlobalProperty pc_compat_1_5[];
-extern const size_t pc_compat_1_5_len;
-
-extern GlobalProperty pc_compat_1_4[];
-extern const size_t pc_compat_1_4_len;
-
-/* Helper for setting model-id for CPU models that changed model-id
- * depending on QEMU versions up to QEMU 2.4.
- */
-#define PC_CPU_MODEL_IDS(v) \
- { "qemu32-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
- { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\
- { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },
-
#define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \
static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \
{ \
@@ -288,5 +310,4 @@ extern const size_t pc_compat_1_4_len;
} \
type_init(pc_machine_init_##suffix)
-extern void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id);
#endif
diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h
new file mode 100644
index 0000000000..3e00efd870
--- /dev/null
+++ b/include/hw/i386/sgx-epc.h
@@ -0,0 +1,71 @@
+/*
+ * SGX EPC device
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Authors:
+ * Sean Christopherson <sean.j.christopherson@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_SGX_EPC_H
+#define QEMU_SGX_EPC_H
+
+#include "hw/qdev-core.h"
+#include "hw/i386/hostmem-epc.h"
+
+#define TYPE_SGX_EPC "sgx-epc"
+#define SGX_EPC(obj) \
+ OBJECT_CHECK(SGXEPCDevice, (obj), TYPE_SGX_EPC)
+#define SGX_EPC_CLASS(oc) \
+ OBJECT_CLASS_CHECK(SGXEPCDeviceClass, (oc), TYPE_SGX_EPC)
+#define SGX_EPC_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SGXEPCDeviceClass, (obj), TYPE_SGX_EPC)
+
+#define SGX_EPC_ADDR_PROP "addr"
+#define SGX_EPC_SIZE_PROP "size"
+#define SGX_EPC_MEMDEV_PROP "memdev"
+#define SGX_EPC_NUMA_NODE_PROP "node"
+
+/**
+ * SGXEPCDevice:
+ * @addr: starting guest physical address, where @SGXEPCDevice is mapped.
+ * Default value: 0, means that address is auto-allocated.
+ * @hostmem: host memory backend providing memory for @SGXEPCDevice
+ */
+typedef struct SGXEPCDevice {
+ /* private */
+ DeviceState parent_obj;
+
+ /* public */
+ uint64_t addr;
+ uint32_t node;
+ HostMemoryBackendEpc *hostmem;
+} SGXEPCDevice;
+
+/*
+ * @base: address in guest physical address space where EPC regions start
+ * @mr: address space container for memory devices
+ */
+typedef struct SGXEPCState {
+ uint64_t base;
+ uint64_t size;
+
+ MemoryRegion mr;
+
+ struct SGXEPCDevice **sections;
+ int nr_sections;
+} SGXEPCState;
+
+bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size);
+void sgx_epc_build_srat(GArray *table_data);
+
+static inline uint64_t sgx_epc_above_4g_end(SGXEPCState *sgx_epc)
+{
+ assert(sgx_epc != NULL && sgx_epc->base >= 0x100000000ULL);
+
+ return sgx_epc->base + sgx_epc->size;
+}
+
+#endif
diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h
index 07239f95f4..d4eeb7ab82 100644
--- a/include/hw/i386/topology.h
+++ b/include/hw/i386/topology.h
@@ -24,14 +24,15 @@
#ifndef HW_I386_TOPOLOGY_H
#define HW_I386_TOPOLOGY_H
-/* This file implements the APIC-ID-based CPU topology enumeration logic,
+/*
+ * This file implements the APIC-ID-based CPU topology enumeration logic,
* documented at the following document:
* Intel® 64 Architecture Processor Topology Enumeration
* http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/
*
* This code should be compatible with AMD's "Extended Method" described at:
* AMD CPUID Specification (Publication #25481)
- * Section 3: Multiple Core Calcuation
+ * Section 3: Multiple Core Calculation
* as long as:
* nr_threads is set to 1;
* OFFSET_IDX is assumed to be 0;
@@ -41,27 +42,25 @@
#include "qemu/bitops.h"
-/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
+/*
+ * APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support
*/
typedef uint32_t apic_id_t;
typedef struct X86CPUTopoIDs {
unsigned pkg_id;
- unsigned node_id;
unsigned die_id;
unsigned core_id;
unsigned smt_id;
} X86CPUTopoIDs;
typedef struct X86CPUTopoInfo {
- unsigned nodes_per_pkg;
unsigned dies_per_pkg;
unsigned cores_per_die;
unsigned threads_per_core;
} X86CPUTopoInfo;
-/* Return the bit width needed for 'count' IDs
- */
+/* Return the bit width needed for 'count' IDs */
static unsigned apicid_bitwidth_for_count(unsigned count)
{
g_assert(count >= 1);
@@ -69,15 +68,13 @@ static unsigned apicid_bitwidth_for_count(unsigned count)
return count ? 32 - clz32(count) : 0;
}
-/* Bit width of the SMT_ID (thread ID) field on the APIC ID
- */
+/* Bit width of the SMT_ID (thread ID) field on the APIC ID */
static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(topo_info->threads_per_core);
}
-/* Bit width of the Core_ID field
- */
+/* Bit width of the Core_ID field */
static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info)
{
return apicid_bitwidth_for_count(topo_info->cores_per_die);
@@ -89,13 +86,7 @@ static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info)
return apicid_bitwidth_for_count(topo_info->dies_per_pkg);
}
-/* Bit width of the node_id field per socket */
-static inline unsigned apicid_node_width_epyc(X86CPUTopoInfo *topo_info)
-{
- return apicid_bitwidth_for_count(MAX(topo_info->nodes_per_pkg, 1));
-}
-/* Bit offset of the Core_ID field
- */
+/* Bit offset of the Core_ID field */
static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info)
{
return apicid_smt_width(topo_info);
@@ -107,111 +98,17 @@ static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info)
return apicid_core_offset(topo_info) + apicid_core_width(topo_info);
}
-/* Bit offset of the Pkg_ID (socket ID) field
- */
+/* Bit offset of the Pkg_ID (socket ID) field */
static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info)
{
return apicid_die_offset(topo_info) + apicid_die_width(topo_info);
}
-#define NODE_ID_OFFSET 3 /* Minimum node_id offset if numa configured */
-
-/*
- * Bit offset of the node_id field
- *
- * Make sure nodes_per_pkg > 0 if numa configured else zero.
- */
-static inline unsigned apicid_node_offset_epyc(X86CPUTopoInfo *topo_info)
-{
- unsigned offset = apicid_die_offset(topo_info) +
- apicid_die_width(topo_info);
-
- if (topo_info->nodes_per_pkg) {
- return MAX(NODE_ID_OFFSET, offset);
- } else {
- return offset;
- }
-}
-
-/* Bit offset of the Pkg_ID (socket ID) field */
-static inline unsigned apicid_pkg_offset_epyc(X86CPUTopoInfo *topo_info)
-{
- return apicid_node_offset_epyc(topo_info) +
- apicid_node_width_epyc(topo_info);
-}
-
/*
* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
*
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
*/
-static inline apic_id_t
-x86_apicid_from_topo_ids_epyc(X86CPUTopoInfo *topo_info,
- const X86CPUTopoIDs *topo_ids)
-{
- return (topo_ids->pkg_id << apicid_pkg_offset_epyc(topo_info)) |
- (topo_ids->node_id << apicid_node_offset_epyc(topo_info)) |
- (topo_ids->die_id << apicid_die_offset(topo_info)) |
- (topo_ids->core_id << apicid_core_offset(topo_info)) |
- topo_ids->smt_id;
-}
-
-static inline void x86_topo_ids_from_idx_epyc(X86CPUTopoInfo *topo_info,
- unsigned cpu_index,
- X86CPUTopoIDs *topo_ids)
-{
- unsigned nr_nodes = MAX(topo_info->nodes_per_pkg, 1);
- unsigned nr_dies = topo_info->dies_per_pkg;
- unsigned nr_cores = topo_info->cores_per_die;
- unsigned nr_threads = topo_info->threads_per_core;
- unsigned cores_per_node = DIV_ROUND_UP((nr_dies * nr_cores * nr_threads),
- nr_nodes);
-
- topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads);
- topo_ids->node_id = (cpu_index / cores_per_node) % nr_nodes;
- topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies;
- topo_ids->core_id = cpu_index / nr_threads % nr_cores;
- topo_ids->smt_id = cpu_index % nr_threads;
-}
-
-/*
- * Calculate thread/core/package IDs for a specific topology,
- * based on APIC ID
- */
-static inline void x86_topo_ids_from_apicid_epyc(apic_id_t apicid,
- X86CPUTopoInfo *topo_info,
- X86CPUTopoIDs *topo_ids)
-{
- topo_ids->smt_id = apicid &
- ~(0xFFFFFFFFUL << apicid_smt_width(topo_info));
- topo_ids->core_id =
- (apicid >> apicid_core_offset(topo_info)) &
- ~(0xFFFFFFFFUL << apicid_core_width(topo_info));
- topo_ids->die_id =
- (apicid >> apicid_die_offset(topo_info)) &
- ~(0xFFFFFFFFUL << apicid_die_width(topo_info));
- topo_ids->node_id =
- (apicid >> apicid_node_offset_epyc(topo_info)) &
- ~(0xFFFFFFFFUL << apicid_node_width_epyc(topo_info));
- topo_ids->pkg_id = apicid >> apicid_pkg_offset_epyc(topo_info);
-}
-
-/*
- * Make APIC ID for the CPU 'cpu_index'
- *
- * 'cpu_index' is a sequential, contiguous ID for the CPU.
- */
-static inline apic_id_t x86_apicid_from_cpu_idx_epyc(X86CPUTopoInfo *topo_info,
- unsigned cpu_index)
-{
- X86CPUTopoIDs topo_ids;
- x86_topo_ids_from_idx_epyc(topo_info, cpu_index, &topo_ids);
- return x86_apicid_from_topo_ids_epyc(topo_info, &topo_ids);
-}
-/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
- *
- * The caller must make sure core_id < nr_cores and smt_id < nr_threads.
- */
static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info,
const X86CPUTopoIDs *topo_ids)
{
@@ -221,7 +118,8 @@ static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info,
topo_ids->smt_id;
}
-/* Calculate thread/core/package IDs for a specific topology,
+/*
+ * Calculate thread/core/package IDs for a specific topology,
* based on (contiguous) CPU index
*/
static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info,
@@ -238,7 +136,8 @@ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info,
topo_ids->smt_id = cpu_index % nr_threads;
}
-/* Calculate thread/core/package IDs for a specific topology,
+/*
+ * Calculate thread/core/package IDs for a specific topology,
* based on APIC ID
*/
static inline void x86_topo_ids_from_apicid(apic_id_t apicid,
@@ -256,7 +155,8 @@ static inline void x86_topo_ids_from_apicid(apic_id_t apicid,
topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info);
}
-/* Make APIC ID for the CPU 'cpu_index'
+/*
+ * Make APIC ID for the CPU 'cpu_index'
*
* 'cpu_index' is a sequential, contiguous ID for the CPU.
*/
diff --git a/include/hw/i386/vmport.h b/include/hw/i386/vmport.h
index c380b9c1f0..8f5e27c6f5 100644
--- a/include/hw/i386/vmport.h
+++ b/include/hw/i386/vmport.h
@@ -4,7 +4,7 @@
#include "hw/isa/isa.h"
#define TYPE_VMPORT "vmport"
-typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
+typedef uint32_t VMPortReadFunc(void *opaque, uint32_t address);
typedef enum {
VMPORT_CMD_GETVERSION = 10,
diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h
index 8e10383b11..bfd21649d0 100644
--- a/include/hw/i386/x86-iommu.h
+++ b/include/hw/i386/x86-iommu.h
@@ -21,30 +21,17 @@
#define HW_I386_X86_IOMMU_H
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
+#include "qom/object.h"
#define TYPE_X86_IOMMU_DEVICE ("x86-iommu")
-#define X86_IOMMU_DEVICE(obj) \
- OBJECT_CHECK(X86IOMMUState, (obj), TYPE_X86_IOMMU_DEVICE)
-#define X86_IOMMU_CLASS(klass) \
- OBJECT_CLASS_CHECK(X86IOMMUClass, (klass), TYPE_X86_IOMMU_DEVICE)
-#define X86_IOMMU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(X86IOMMUClass, obj, TYPE_X86_IOMMU_DEVICE)
+OBJECT_DECLARE_TYPE(X86IOMMUState, X86IOMMUClass, X86_IOMMU_DEVICE)
#define X86_IOMMU_SID_INVALID (0xffff)
-typedef struct X86IOMMUState X86IOMMUState;
-typedef struct X86IOMMUClass X86IOMMUClass;
typedef struct X86IOMMUIrq X86IOMMUIrq;
typedef struct X86IOMMU_MSIMessage X86IOMMU_MSIMessage;
-typedef enum IommuType {
- TYPE_INTEL,
- TYPE_AMD,
- TYPE_NONE
-} IommuType;
-
struct X86IOMMUClass {
SysBusDeviceClass parent;
/* Intel/AMD specific realize() hook */
@@ -77,7 +64,6 @@ struct X86IOMMUState {
OnOffAuto intr_supported; /* Whether vIOMMU supports IR */
bool dt_supported; /* Whether vIOMMU supports DT */
bool pt_supported; /* Whether vIOMMU supports pass-through */
- IommuType type; /* IOMMU type - AMD/Intel */
QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */
};
@@ -100,41 +86,43 @@ struct X86IOMMUIrq {
struct X86IOMMU_MSIMessage {
union {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint32_t __addr_head:12; /* 0xfee */
- uint32_t dest:8;
- uint32_t __reserved:8;
- uint32_t redir_hint:1;
- uint32_t dest_mode:1;
- uint32_t __not_used:2;
+#if HOST_BIG_ENDIAN
+ uint64_t __addr_hi:32;
+ uint64_t __addr_head:12; /* 0xfee */
+ uint64_t dest:8;
+ uint64_t __reserved:8;
+ uint64_t redir_hint:1;
+ uint64_t dest_mode:1;
+ uint64_t __not_used:2;
#else
- uint32_t __not_used:2;
- uint32_t dest_mode:1;
- uint32_t redir_hint:1;
- uint32_t __reserved:8;
- uint32_t dest:8;
- uint32_t __addr_head:12; /* 0xfee */
+ uint64_t __not_used:2;
+ uint64_t dest_mode:1;
+ uint64_t redir_hint:1;
+ uint64_t __reserved:8;
+ uint64_t dest:8;
+ uint64_t __addr_head:12; /* 0xfee */
+ uint64_t __addr_hi:32;
#endif
- uint32_t __addr_hi;
} QEMU_PACKED;
uint64_t msi_addr;
};
union {
struct {
-#ifdef HOST_WORDS_BIGENDIAN
- uint16_t trigger_mode:1;
- uint16_t level:1;
- uint16_t __resved:3;
- uint16_t delivery_mode:3;
- uint16_t vector:8;
+#if HOST_BIG_ENDIAN
+ uint32_t __resved1:16;
+ uint32_t trigger_mode:1;
+ uint32_t level:1;
+ uint32_t __resved:3;
+ uint32_t delivery_mode:3;
+ uint32_t vector:8;
#else
- uint16_t vector:8;
- uint16_t delivery_mode:3;
- uint16_t __resved:3;
- uint16_t level:1;
- uint16_t trigger_mode:1;
+ uint32_t vector:8;
+ uint32_t delivery_mode:3;
+ uint32_t __resved:3;
+ uint32_t level:1;
+ uint32_t trigger_mode:1;
+ uint32_t __resved1:16;
#endif
- uint16_t __resved1;
} QEMU_PACKED;
uint32_t msi_data;
};
@@ -146,11 +134,6 @@ struct X86IOMMU_MSIMessage {
*/
X86IOMMUState *x86_iommu_get_default(void);
-/*
- * x86_iommu_get_type - get IOMMU type
- */
-IommuType x86_iommu_get_type(void);
-
/**
* x86_iommu_iec_register_notifier - register IEC (Interrupt Entry
* Cache) notifiers
diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h
index b79f24e285..4dc30dcb4d 100644
--- a/include/hw/i386/x86.h
+++ b/include/hw/i386/x86.h
@@ -17,17 +17,14 @@
#ifndef HW_I386_X86_H
#define HW_I386_X86_H
-#include "qemu-common.h"
#include "exec/hwaddr.h"
-#include "qemu/notify.h"
-#include "hw/i386/topology.h"
#include "hw/boards.h"
-#include "hw/nmi.h"
+#include "hw/intc/ioapic.h"
#include "hw/isa/isa.h"
-#include "hw/i386/ioapic.h"
+#include "qom/object.h"
-typedef struct {
+struct X86MachineClass {
/*< private >*/
MachineClass parent;
@@ -35,11 +32,13 @@ typedef struct {
/* TSC rate migration: */
bool save_tsc_khz;
- /* Enables contiguous-apic-ID mode */
- bool compat_apic_id_mode;
-} X86MachineClass;
+ /* use DMA capable linuxboot option rom */
+ bool fwcfg_dma_enabled;
+ /* CPU and apic information: */
+ bool apic_xrupt_override;
+};
-typedef struct {
+struct X86MachineState {
/*< private >*/
MachineState parent;
@@ -49,48 +48,53 @@ typedef struct {
ISADevice *rtc;
FWCfgState *fw_cfg;
qemu_irq *gsi;
+ DeviceState *ioapic2;
GMappedFile *initrd_mapped_file;
+ HotplugHandler *acpi_dev;
/* RAM information (sizes, addresses, configuration): */
ram_addr_t below_4g_mem_size, above_4g_mem_size;
+ /* Start address of the initial RAM above 4G */
+ uint64_t above_4g_mem_start;
+
/* CPU and apic information: */
- bool apic_xrupt_override;
+ unsigned pci_irq_mask;
unsigned apic_id_limit;
uint16_t boot_cpus;
- unsigned smp_dies;
+ SgxEPCList *sgx_epc_list;
OnOffAuto smm;
OnOffAuto acpi;
+ OnOffAuto pit;
+ OnOffAuto pic;
- /* Apic id specific handlers */
- uint32_t (*apicid_from_cpu_idx)(X86CPUTopoInfo *topo_info,
- unsigned cpu_index);
- void (*topo_ids_from_apicid)(apic_id_t apicid, X86CPUTopoInfo *topo_info,
- X86CPUTopoIDs *topo_ids);
- apic_id_t (*apicid_from_topo_ids)(X86CPUTopoInfo *topo_info,
- const X86CPUTopoIDs *topo_ids);
- uint32_t (*apicid_pkg_offset)(X86CPUTopoInfo *topo_info);
-
+ char *oem_id;
+ char *oem_table_id;
/*
* Address space used by IOAPIC device. All IOAPIC interrupts
* will be translated to MSI messages in the address space.
*/
AddressSpace *ioapic_as;
-} X86MachineState;
+
+ /*
+ * Ratelimit enforced on detected bus locks in guest.
+ * The default value of the bus_lock_ratelimit is 0 per second,
+ * which means no limitation on the guest's bus locks.
+ */
+ uint64_t bus_lock_ratelimit;
+};
#define X86_MACHINE_SMM "smm"
#define X86_MACHINE_ACPI "acpi"
+#define X86_MACHINE_PIT "pit"
+#define X86_MACHINE_PIC "pic"
+#define X86_MACHINE_OEM_ID "x-oem-id"
+#define X86_MACHINE_OEM_TABLE_ID "x-oem-table-id"
+#define X86_MACHINE_BUS_LOCK_RATELIMIT "bus-lock-ratelimit"
#define TYPE_X86_MACHINE MACHINE_TYPE_NAME("x86")
-#define X86_MACHINE(obj) \
- OBJECT_CHECK(X86MachineState, (obj), TYPE_X86_MACHINE)
-#define X86_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(X86MachineClass, obj, TYPE_X86_MACHINE)
-#define X86_MACHINE_CLASS(class) \
- OBJECT_CLASS_CHECK(X86MachineClass, class, TYPE_X86_MACHINE)
-
-void init_topo_info(X86CPUTopoInfo *topo_info, const X86MachineState *x86ms);
+OBJECT_DECLARE_TYPE(X86MachineState, X86MachineClass, X86_MACHINE)
uint32_t x86_cpu_apic_id_from_index(X86MachineState *pcms,
unsigned int cpu_index);
@@ -101,32 +105,44 @@ CpuInstanceProperties x86_cpu_index_to_props(MachineState *ms,
unsigned cpu_index);
int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx);
const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms);
-
-void x86_bios_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw);
+CPUArchId *x86_find_cpu_slot(MachineState *ms, uint32_t id, int *idx);
+void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count);
+void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void x86_cpu_unplug_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+
+void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
+ MemoryRegion *rom_memory, bool isapc_ram_fw);
void x86_load_linux(X86MachineState *x86ms,
FWCfgState *fw_cfg,
int acpi_data_size,
- bool pvh_enabled,
- bool linuxboot_dma_enabled);
+ bool pvh_enabled);
-bool x86_machine_is_smm_enabled(X86MachineState *x86ms);
-bool x86_machine_is_acpi_enabled(X86MachineState *x86ms);
+bool x86_machine_is_smm_enabled(const X86MachineState *x86ms);
+bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms);
/* Global System Interrupts */
-#define GSI_NUM_PINS IOAPIC_NUM_PINS
+#define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
typedef struct GSIState {
qemu_irq i8259_irq[ISA_NUM_IRQS];
qemu_irq ioapic_irq[IOAPIC_NUM_PINS];
+ qemu_irq ioapic2_irq[IOAPIC_NUM_PINS];
} GSIState;
qemu_irq x86_allocate_cpu_irq(void);
void gsi_handler(void *opaque, int n, int level);
-void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name);
+void ioapic_init_gsi(GSIState *gsi_state, Object *parent);
+DeviceState *ioapic_init_secondary(GSIState *gsi_state);
-/* hpet.c */
-extern int no_hpet;
+/* pc_sysfw.c */
+void x86_firmware_configure(void *ptr, int size);
#endif
diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h
new file mode 100644
index 0000000000..1000f8f543
--- /dev/null
+++ b/include/hw/i386/xen_arch_hvm.h
@@ -0,0 +1,11 @@
+#ifndef HW_XEN_ARCH_I386_HVM_H
+#define HW_XEN_ARCH_I386_HVM_H
+
+#include <xen/hvm/ioreq.h>
+#include "hw/xen/xen-hvm-common.h"
+
+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
+void arch_xen_set_memory(XenIOState *state,
+ MemoryRegionSection *section,
+ bool add);
+#endif
diff --git a/include/hw/ide.h b/include/hw/ide.h
deleted file mode 100644
index c5ce5da4f4..0000000000
--- a/include/hw/ide.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef HW_IDE_H
-#define HW_IDE_H
-
-#include "hw/isa/isa.h"
-#include "exec/memory.h"
-
-/* ide-isa.c */
-ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
- DriveInfo *hd0, DriveInfo *hd1);
-
-/* ide-pci.c */
-int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux);
-
-/* ide-mmio.c */
-void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1);
-
-int ide_get_geometry(BusState *bus, int unit,
- int16_t *cyls, int8_t *heads, int8_t *secs);
-int ide_get_bios_chs_trans(BusState *bus, int unit);
-
-/* ide/core.c */
-void ide_drive_get(DriveInfo **hd, int max_bus);
-
-#endif /* HW_IDE_H */
diff --git a/include/hw/ide/ahci-pci.h b/include/hw/ide/ahci-pci.h
new file mode 100644
index 0000000000..c2ee616962
--- /dev/null
+++ b/include/hw/ide/ahci-pci.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU AHCI Emulation (PCI devices)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_IDE_AHCI_PCI_H
+#define HW_IDE_AHCI_PCI_H
+
+#include "qom/object.h"
+#include "hw/ide/ahci.h"
+#include "hw/pci/pci_device.h"
+
+#define TYPE_ICH9_AHCI "ich9-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI)
+
+struct AHCIPCIState {
+ PCIDevice parent_obj;
+
+ AHCIState ahci;
+};
+
+#endif
diff --git a/include/hw/ide/ahci-sysbus.h b/include/hw/ide/ahci-sysbus.h
new file mode 100644
index 0000000000..06eaac8cb6
--- /dev/null
+++ b/include/hw/ide/ahci-sysbus.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU AHCI Emulation (MMIO-mapped devices)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_IDE_AHCI_SYSBUS_H
+#define HW_IDE_AHCI_SYSBUS_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/ide/ahci.h"
+
+#define TYPE_SYSBUS_AHCI "sysbus-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(SysbusAHCIState, SYSBUS_AHCI)
+
+struct SysbusAHCIState {
+ SysBusDevice parent_obj;
+
+ AHCIState ahci;
+};
+
+#define TYPE_ALLWINNER_AHCI "allwinner-ahci"
+OBJECT_DECLARE_SIMPLE_TYPE(AllwinnerAHCIState, ALLWINNER_AHCI)
+
+#define ALLWINNER_AHCI_MMIO_OFF 0x80
+#define ALLWINNER_AHCI_MMIO_SIZE 0x80
+
+struct AllwinnerAHCIState {
+ SysbusAHCIState parent_obj;
+
+ MemoryRegion mmio;
+ uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE / 4];
+};
+
+#endif
diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h
index b44e3000cf..ba31e75ff9 100644
--- a/include/hw/ide/ahci.h
+++ b/include/hw/ide/ahci.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,7 +24,7 @@
#ifndef HW_IDE_AHCI_H
#define HW_IDE_AHCI_H
-#include "hw/sysbus.h"
+#include "exec/memory.h"
typedef struct AHCIDevice AHCIDevice;
@@ -45,41 +45,12 @@ typedef struct AHCIState {
MemoryRegion idp; /* Index-Data Pair I/O port space */
unsigned idp_offset; /* Offset of index in I/O port space */
uint32_t idp_index; /* Current IDP index */
- int32_t ports;
+ uint32_t ports;
qemu_irq irq;
AddressSpace *as;
} AHCIState;
-typedef struct AHCIPCIState AHCIPCIState;
-#define TYPE_ICH9_AHCI "ich9-ahci"
-
-int32_t ahci_get_num_ports(PCIDevice *dev);
-void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd);
-
-#define TYPE_SYSBUS_AHCI "sysbus-ahci"
-
-typedef struct SysbusAHCIState {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- AHCIState ahci;
- uint32_t num_ports;
-} SysbusAHCIState;
-
-#define TYPE_ALLWINNER_AHCI "allwinner-ahci"
-
-#define ALLWINNER_AHCI_MMIO_OFF 0x80
-#define ALLWINNER_AHCI_MMIO_SIZE 0x80
-
-typedef struct AllwinnerAHCIState {
- /*< private >*/
- SysbusAHCIState parent_obj;
- /*< public >*/
-
- MemoryRegion mmio;
- uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE/4];
-} AllwinnerAHCIState;
+void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd);
#endif /* HW_IDE_AHCI_H */
diff --git a/include/hw/ide/ide-bus.h b/include/hw/ide/ide-bus.h
new file mode 100644
index 0000000000..4841a7dcd6
--- /dev/null
+++ b/include/hw/ide/ide-bus.h
@@ -0,0 +1,42 @@
+#ifndef HW_IDE_BUS_H
+#define HW_IDE_BUS_H
+
+#include "exec/ioport.h"
+#include "hw/ide/ide-dev.h"
+#include "hw/ide/ide-dma.h"
+
+struct IDEBus {
+ BusState qbus;
+ IDEDevice *master;
+ IDEDevice *slave;
+ IDEState ifs[2];
+ QEMUBH *bh;
+
+ int bus_id;
+ int max_units;
+ IDEDMA *dma;
+ uint8_t unit;
+ uint8_t cmd;
+ qemu_irq irq; /* bus output */
+
+ int error_status;
+ uint8_t retry_unit;
+ int64_t retry_sector_num;
+ uint32_t retry_nsector;
+ PortioList portio_list;
+ PortioList portio2_list;
+ VMChangeStateEntry *vmstate;
+};
+
+#define TYPE_IDE_BUS "IDE"
+OBJECT_DECLARE_SIMPLE_TYPE(IDEBus, IDE_BUS)
+
+void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
+ int bus_id, int max_units);
+IDEDevice *ide_bus_create_drive(IDEBus *bus, int unit, DriveInfo *drive);
+
+int ide_get_geometry(BusState *bus, int unit,
+ int16_t *cyls, int8_t *heads, int8_t *secs);
+int ide_get_bios_chs_trans(BusState *bus, int unit);
+
+#endif
diff --git a/include/hw/ide/ide-dev.h b/include/hw/ide/ide-dev.h
new file mode 100644
index 0000000000..9a0d71db4e
--- /dev/null
+++ b/include/hw/ide/ide-dev.h
@@ -0,0 +1,186 @@
+/*
+ * ide device definitions
+ *
+ * Copyright (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef IDE_DEV_H
+#define IDE_DEV_H
+
+#include "sysemu/dma.h"
+#include "hw/qdev-properties.h"
+#include "hw/block/block.h"
+
+typedef struct IDEDevice IDEDevice;
+typedef struct IDEState IDEState;
+typedef struct IDEBus IDEBus;
+
+typedef void EndTransferFunc(IDEState *);
+
+#define MAX_IDE_DEVS 2
+
+#define TYPE_IDE_DEVICE "ide-device"
+OBJECT_DECLARE_TYPE(IDEDevice, IDEDeviceClass, IDE_DEVICE)
+
+typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind;
+
+struct unreported_events {
+ bool eject_request;
+ bool new_media;
+};
+
+enum ide_dma_cmd {
+ IDE_DMA_READ = 0,
+ IDE_DMA_WRITE,
+ IDE_DMA_TRIM,
+ IDE_DMA_ATAPI,
+ IDE_DMA__COUNT
+};
+
+/* NOTE: IDEState represents in fact one drive */
+struct IDEState {
+ IDEBus *bus;
+ uint8_t unit;
+ /* ide config */
+ IDEDriveKind drive_kind;
+ int drive_heads, drive_sectors;
+ int cylinders, heads, sectors, chs_trans;
+ int64_t nb_sectors;
+ int mult_sectors;
+ int identify_set;
+ uint8_t identify_data[512];
+ int drive_serial;
+ char drive_serial_str[21];
+ char drive_model_str[41];
+ bool win2k_install_hack;
+ uint64_t wwn;
+ /* ide regs */
+ uint8_t feature;
+ uint8_t error;
+ uint32_t nsector;
+ uint8_t sector;
+ uint8_t lcyl;
+ uint8_t hcyl;
+ /* other part of tf for lba48 support */
+ uint8_t hob_feature;
+ uint8_t hob_nsector;
+ uint8_t hob_sector;
+ uint8_t hob_lcyl;
+ uint8_t hob_hcyl;
+
+ uint8_t select;
+ uint8_t status;
+
+ bool io8;
+ bool reset_reverts;
+
+ /* set for lba48 access */
+ uint8_t lba48;
+ BlockBackend *blk;
+ char version[9];
+ /* ATAPI specific */
+ struct unreported_events events;
+ uint8_t sense_key;
+ uint8_t asc;
+ bool tray_open;
+ bool tray_locked;
+ uint8_t cdrom_changed;
+ int packet_transfer_size;
+ int elementary_transfer_size;
+ int32_t io_buffer_index;
+ int lba;
+ int cd_sector_size;
+ int atapi_dma; /* true if dma is requested for the packet cmd */
+ BlockAcctCookie acct;
+ BlockAIOCB *pio_aiocb;
+ QEMUIOVector qiov;
+ QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
+ /* ATA DMA state */
+ uint64_t io_buffer_offset;
+ int32_t io_buffer_size;
+ QEMUSGList sg;
+ /* PIO transfer handling */
+ int req_nb_sectors; /* number of sectors per interrupt */
+ EndTransferFunc *end_transfer_func;
+ uint8_t *data_ptr;
+ uint8_t *data_end;
+ uint8_t *io_buffer;
+ /* PIO save/restore */
+ int32_t io_buffer_total_len;
+ int32_t cur_io_buffer_offset;
+ int32_t cur_io_buffer_len;
+ uint8_t end_transfer_fn_idx;
+ QEMUTimer *sector_write_timer; /* only used for win2k install hack */
+ uint32_t irq_count; /* counts IRQs when using win2k install hack */
+ /* CF-ATA extended error */
+ uint8_t ext_error;
+ /* CF-ATA metadata storage */
+ uint32_t mdata_size;
+ uint8_t *mdata_storage;
+ int media_changed;
+ enum ide_dma_cmd dma_cmd;
+ /* SMART */
+ uint8_t smart_enabled;
+ uint8_t smart_autosave;
+ int smart_errors;
+ uint8_t smart_selftest_count;
+ uint8_t *smart_selftest_data;
+ /* AHCI */
+ int ncq_queues;
+};
+
+struct IDEDeviceClass {
+ DeviceClass parent_class;
+ void (*realize)(IDEDevice *dev, Error **errp);
+};
+
+struct IDEDevice {
+ DeviceState qdev;
+ uint32_t unit;
+ BlockConf conf;
+ int chs_trans;
+ char *version;
+ char *serial;
+ char *model;
+ uint64_t wwn;
+ /*
+ * 0x0000 - rotation rate not reported
+ * 0x0001 - non-rotating medium (SSD)
+ * 0x0002-0x0400 - reserved
+ * 0x0401-0xffe - rotations per minute
+ * 0xffff - reserved
+ */
+ uint16_t rotation_rate;
+ bool win2k_install_hack;
+};
+
+typedef struct IDEDrive {
+ IDEDevice dev;
+} IDEDrive;
+
+#define DEFINE_IDE_DEV_PROPERTIES() \
+ DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \
+ DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \
+ DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\
+ DEFINE_PROP_STRING("model", IDEDrive, dev.model)
+
+void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp);
+
+void ide_drive_get(DriveInfo **hd, int max_bus);
+
+#endif
diff --git a/include/hw/ide/ide-dma.h b/include/hw/ide/ide-dma.h
new file mode 100644
index 0000000000..d0b19ac9c5
--- /dev/null
+++ b/include/hw/ide/ide-dma.h
@@ -0,0 +1,37 @@
+#ifndef HW_IDE_DMA_H
+#define HW_IDE_DMA_H
+
+#include "block/aio.h"
+#include "qemu/iov.h"
+
+typedef struct IDEState IDEState;
+typedef struct IDEDMAOps IDEDMAOps;
+typedef struct IDEDMA IDEDMA;
+
+typedef void DMAStartFunc(const IDEDMA *, IDEState *, BlockCompletionFunc *);
+typedef void DMAVoidFunc(const IDEDMA *);
+typedef int DMAIntFunc(const IDEDMA *, bool);
+typedef int32_t DMAInt32Func(const IDEDMA *, int32_t len);
+typedef void DMAu32Func(const IDEDMA *, uint32_t);
+typedef void DMAStopFunc(const IDEDMA *, bool);
+
+struct IDEDMAOps {
+ DMAStartFunc *start_dma;
+ DMAVoidFunc *pio_transfer;
+ DMAInt32Func *prepare_buf;
+ DMAu32Func *commit_buf;
+ DMAIntFunc *rw_buf;
+ DMAVoidFunc *restart;
+ DMAVoidFunc *restart_dma;
+ DMAStopFunc *set_inactive;
+ DMAVoidFunc *cmd_done;
+ DMAVoidFunc *reset;
+};
+
+struct IDEDMA {
+ const IDEDMAOps *ops;
+ QEMUIOVector qiov;
+ BlockAIOCB *aiocb;
+};
+
+#endif
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
deleted file mode 100644
index 1a7869e85d..0000000000
--- a/include/hw/ide/internal.h
+++ /dev/null
@@ -1,649 +0,0 @@
-#ifndef HW_IDE_INTERNAL_H
-#define HW_IDE_INTERNAL_H
-
-/*
- * QEMU IDE Emulation -- internal header file
- * only files in hw/ide/ are supposed to include this file.
- * non-internal declarations are in hw/ide.h
- */
-
-#include "qapi/qapi-types-run-state.h"
-#include "hw/ide.h"
-#include "hw/irq.h"
-#include "hw/isa/isa.h"
-#include "sysemu/dma.h"
-#include "hw/block/block.h"
-#include "scsi/constants.h"
-
-/* debug IDE devices */
-#define USE_DMA_CDROM
-
-typedef struct IDEBus IDEBus;
-typedef struct IDEDevice IDEDevice;
-typedef struct IDEState IDEState;
-typedef struct IDEDMA IDEDMA;
-typedef struct IDEDMAOps IDEDMAOps;
-
-#define TYPE_IDE_BUS "IDE"
-#define IDE_BUS(obj) OBJECT_CHECK(IDEBus, (obj), TYPE_IDE_BUS)
-
-#define MAX_IDE_DEVS 2
-
-/* Bits of HD_STATUS */
-#define ERR_STAT 0x01
-#define INDEX_STAT 0x02
-#define ECC_STAT 0x04 /* Corrected error */
-#define DRQ_STAT 0x08
-#define SEEK_STAT 0x10
-#define SRV_STAT 0x10
-#define WRERR_STAT 0x20
-#define READY_STAT 0x40
-#define BUSY_STAT 0x80
-
-/* Bits for HD_ERROR */
-#define MARK_ERR 0x01 /* Bad address mark */
-#define TRK0_ERR 0x02 /* couldn't find track 0 */
-#define ABRT_ERR 0x04 /* Command aborted */
-#define MCR_ERR 0x08 /* media change request */
-#define ID_ERR 0x10 /* ID field not found */
-#define MC_ERR 0x20 /* media changed */
-#define ECC_ERR 0x40 /* Uncorrectable ECC error */
-#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
-#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
-
-/* Bits of HD_NSECTOR */
-#define CD 0x01
-#define IO 0x02
-#define REL 0x04
-#define TAG_MASK 0xf8
-
-#define IDE_CMD_RESET 0x04
-#define IDE_CMD_DISABLE_IRQ 0x02
-
-/* ACS-2 T13/2015-D Table B.2 Command codes */
-#define WIN_NOP 0x00
-/* reserved 0x01..0x02 */
-#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */
-/* reserved 0x04..0x05 */
-#define WIN_DSM 0x06
-/* reserved 0x07 */
-#define WIN_DEVICE_RESET 0x08
-/* reserved 0x09..0x0a */
-/* REQUEST SENSE DATA EXT 0x0B */
-/* reserved 0x0C..0x0F */
-#define WIN_RECAL 0x10 /* obsolete since ATA4 */
-/* obsolete since ATA3, retired in ATA4 0x11..0x1F */
-#define WIN_READ 0x20 /* 28-Bit */
-#define WIN_READ_ONCE 0x21 /* 28-Bit w/o retries, obsolete since ATA5 */
-/* obsolete since ATA4 0x22..0x23 */
-#define WIN_READ_EXT 0x24 /* 48-Bit */
-#define WIN_READDMA_EXT 0x25 /* 48-Bit */
-#define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit, obsolete since ACS2 */
-#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */
-/* reserved 0x28 */
-#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */
-/* READ STREAM DMA EXT 0x2A */
-/* READ STREAM EXT 0x2B */
-/* reserved 0x2C..0x2E */
-/* READ LOG EXT 0x2F */
-#define WIN_WRITE 0x30 /* 28-Bit */
-#define WIN_WRITE_ONCE 0x31 /* 28-Bit w/o retries, obsolete since ATA5 */
-/* obsolete since ATA4 0x32..0x33 */
-#define WIN_WRITE_EXT 0x34 /* 48-Bit */
-#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */
-#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */
-#define WIN_SET_MAX_EXT 0x37 /* 48-Bit, obsolete since ACS2 */
-#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */
-#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */
-#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */
-/* WRITE STREAM DMA EXT 0x3A */
-/* WRITE STREAM EXT 0x3B */
-#define WIN_WRITE_VERIFY 0x3C /* 28-Bit, obsolete since ATA4 */
-/* WRITE DMA FUA EXT 0x3D */
-/* obsolete since ACS2 0x3E */
-/* WRITE LOG EXT 0x3F */
-#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */
-#define WIN_VERIFY_ONCE 0x41 /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_VERIFY_EXT 0x42 /* 48-Bit */
-/* reserved 0x43..0x44 */
-/* WRITE UNCORRECTABLE EXT 0x45 */
-/* reserved 0x46 */
-/* READ LOG DMA EXT 0x47 */
-/* reserved 0x48..0x4F */
-/* obsolete since ATA4 0x50 */
-/* CONFIGURE STREAM 0x51 */
-/* reserved 0x52..0x56 */
-/* WRITE LOG DMA EXT 0x57 */
-/* reserved 0x58..0x5A */
-/* TRUSTED NON DATA 0x5B */
-/* TRUSTED RECEIVE 0x5C */
-/* TRUSTED RECEIVE DMA 0x5D */
-/* TRUSTED SEND 0x5E */
-/* TRUSTED SEND DMA 0x5F */
-/* READ FPDMA QUEUED 0x60 */
-/* WRITE FPDMA QUEUED 0x61 */
-/* reserved 0x62->0x6F */
-#define WIN_SEEK 0x70 /* obsolete since ATA7 */
-/* reserved 0x71-0x7F */
-/* vendor specific 0x80-0x86 */
-#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */
-/* vendor specific 0x88-0x8F */
-#define WIN_DIAGNOSE 0x90
-#define WIN_SPECIFY 0x91 /* set drive geometry translation, obsolete since ATA6 */
-#define WIN_DOWNLOAD_MICROCODE 0x92
-/* DOWNLOAD MICROCODE DMA 0x93 */
-#define WIN_STANDBYNOW2 0x94 /* retired in ATA4 */
-#define WIN_IDLEIMMEDIATE2 0x95 /* force drive to become "ready", retired in ATA4 */
-#define WIN_STANDBY2 0x96 /* retired in ATA4 */
-#define WIN_SETIDLE2 0x97 /* retired in ATA4 */
-#define WIN_CHECKPOWERMODE2 0x98 /* retired in ATA4 */
-#define WIN_SLEEPNOW2 0x99 /* retired in ATA4 */
-/* vendor specific 0x9A */
-/* reserved 0x9B..0x9F */
-#define WIN_PACKETCMD 0xA0 /* Send a packet command. */
-#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */
-#define WIN_QUEUED_SERVICE 0xA2 /* obsolete since ACS2 */
-/* reserved 0xA3..0xAF */
-#define WIN_SMART 0xB0 /* self-monitoring and reporting */
-/* Device Configuration Overlay 0xB1 */
-/* reserved 0xB2..0xB3 */
-/* Sanitize Device 0xB4 */
-/* reserved 0xB5 */
-/* NV Cache 0xB6 */
-/* reserved for CFA 0xB7..0xBB */
-#define CFA_ACCESS_METADATA_STORAGE 0xB8
-/* reserved 0xBC..0xBF */
-#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */
-/* vendor specific 0xC1..0xC3 */
-#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/
-#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */
-#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */
-#define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers, obsolete since ACS2 */
-#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */
-#define WIN_READDMA_ONCE 0xC9 /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */
-#define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - w/o retries, obsolete since ATA5 */
-#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */
-#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */
-/* WRITE MULTIPLE FUA EXT 0xCE */
-/* reserved 0xCF..0xDO */
-/* CHECK MEDIA CARD TYPE 0xD1 */
-/* reserved for media card pass through 0xD2..0xD4 */
-/* reserved 0xD5..0xD9 */
-#define WIN_GETMEDIASTATUS 0xDA /* obsolete since ATA8 */
-/* obsolete since ATA3, retired in ATA4 0xDB..0xDD */
-#define WIN_DOORLOCK 0xDE /* lock door on removable drives, obsolete since ATA8 */
-#define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives, obsolete since ATA8 */
-#define WIN_STANDBYNOW1 0xE0
-#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */
-#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */
-#define WIN_SETIDLE1 0xE3
-#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */
-#define WIN_CHECKPOWERMODE1 0xE5
-#define WIN_SLEEPNOW1 0xE6
-#define WIN_FLUSH_CACHE 0xE7
-#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */
-/* READ BUFFER DMA 0xE9 */
-#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */
-/* WRITE BUFFER DMA 0xEB */
-#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */
-#define WIN_MEDIAEJECT 0xED /* obsolete since ATA8 */
-/* obsolete since ATA4 0xEE */
-#define WIN_SETFEATURES 0xEF /* set special drive features */
-#define IBM_SENSE_CONDITION 0xF0 /* measure disk temperature, vendor specific */
-#define WIN_SECURITY_SET_PASS 0xF1
-#define WIN_SECURITY_UNLOCK 0xF2
-#define WIN_SECURITY_ERASE_PREPARE 0xF3
-#define WIN_SECURITY_ERASE_UNIT 0xF4
-#define WIN_SECURITY_FREEZE_LOCK 0xF5
-#define CFA_WEAR_LEVEL 0xF5 /* microdrives implement as NOP; not specified in T13! */
-#define WIN_SECURITY_DISABLE 0xF6
-/* vendor specific 0xF7 */
-#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */
-#define WIN_SET_MAX 0xF9
-/* vendor specific 0xFA..0xFF */
-
-/* set to 1 set disable mult support */
-#define MAX_MULT_SECTORS 16
-
-#define IDE_DMA_BUF_SECTORS 256
-
-/* feature values for Data Set Management */
-#define DSM_TRIM 0x01
-
-#if (IDE_DMA_BUF_SECTORS < MAX_MULT_SECTORS)
-#error "IDE_DMA_BUF_SECTORS must be bigger or equal to MAX_MULT_SECTORS"
-#endif
-
-/* ATAPI defines */
-
-#define ATAPI_PACKET_SIZE 12
-
-/* The generic packet command opcodes for CD/DVD Logical Units,
- * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-#define GPCMD_BLANK 0xa1
-#define GPCMD_CLOSE_TRACK 0x5b
-#define GPCMD_FLUSH_CACHE 0x35
-#define GPCMD_FORMAT_UNIT 0x04
-#define GPCMD_GET_CONFIGURATION 0x46
-#define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define GPCMD_GET_PERFORMANCE 0xac
-#define GPCMD_INQUIRY 0x12
-#define GPCMD_LOAD_UNLOAD 0xa6
-#define GPCMD_MECHANISM_STATUS 0xbd
-#define GPCMD_MODE_SELECT_10 0x55
-#define GPCMD_MODE_SENSE_10 0x5a
-#define GPCMD_PAUSE_RESUME 0x4b
-#define GPCMD_PLAY_AUDIO_10 0x45
-#define GPCMD_PLAY_AUDIO_MSF 0x47
-#define GPCMD_PLAY_AUDIO_TI 0x48
-#define GPCMD_PLAY_CD 0xbc
-#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
-#define GPCMD_READ_10 0x28
-#define GPCMD_READ_12 0xa8
-#define GPCMD_READ_CDVD_CAPACITY 0x25
-#define GPCMD_READ_CD 0xbe
-#define GPCMD_READ_CD_MSF 0xb9
-#define GPCMD_READ_DISC_INFO 0x51
-#define GPCMD_READ_DVD_STRUCTURE 0xad
-#define GPCMD_READ_FORMAT_CAPACITIES 0x23
-#define GPCMD_READ_HEADER 0x44
-#define GPCMD_READ_TRACK_RZONE_INFO 0x52
-#define GPCMD_READ_SUBCHANNEL 0x42
-#define GPCMD_READ_TOC_PMA_ATIP 0x43
-#define GPCMD_REPAIR_RZONE_TRACK 0x58
-#define GPCMD_REPORT_KEY 0xa4
-#define GPCMD_REQUEST_SENSE 0x03
-#define GPCMD_RESERVE_RZONE_TRACK 0x53
-#define GPCMD_SCAN 0xba
-#define GPCMD_SEEK 0x2b
-#define GPCMD_SEND_DVD_STRUCTURE 0xad
-#define GPCMD_SEND_EVENT 0xa2
-#define GPCMD_SEND_KEY 0xa3
-#define GPCMD_SEND_OPC 0x54
-#define GPCMD_SET_READ_AHEAD 0xa7
-#define GPCMD_SET_STREAMING 0xb6
-#define GPCMD_START_STOP_UNIT 0x1b
-#define GPCMD_STOP_PLAY_SCAN 0x4e
-#define GPCMD_TEST_UNIT_READY 0x00
-#define GPCMD_VERIFY_10 0x2f
-#define GPCMD_WRITE_10 0x2a
-#define GPCMD_WRITE_AND_VERIFY_10 0x2e
-/* This is listed as optional in ATAPI 2.6, but is (curiously)
- * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji
- * Table 377 as an MMC command for SCSi devices though... Most ATAPI
- * drives support it. */
-#define GPCMD_SET_SPEED 0xbb
-/* This seems to be a SCSI specific CD-ROM opcode
- * to play data at track/index */
-#define GPCMD_PLAYAUDIO_TI 0x48
-/*
- * From MS Media Status Notification Support Specification. For
- * older drives only.
- */
-#define GPCMD_GET_MEDIA_STATUS 0xda
-#define GPCMD_MODE_SENSE_6 0x1a
-
-#define ATAPI_INT_REASON_CD 0x01 /* 0 = data transfer */
-#define ATAPI_INT_REASON_IO 0x02 /* 1 = transfer to the host */
-#define ATAPI_INT_REASON_REL 0x04
-#define ATAPI_INT_REASON_TAG 0xf8
-
-/* same constants as bochs */
-#define ASC_NO_SEEK_COMPLETE 0x02
-#define ASC_ILLEGAL_OPCODE 0x20
-#define ASC_LOGICAL_BLOCK_OOR 0x21
-#define ASC_INV_FIELD_IN_CMD_PACKET 0x24
-#define ASC_MEDIUM_MAY_HAVE_CHANGED 0x28
-#define ASC_INCOMPATIBLE_FORMAT 0x30
-#define ASC_MEDIUM_NOT_PRESENT 0x3a
-#define ASC_SAVING_PARAMETERS_NOT_SUPPORTED 0x39
-#define ASC_DATA_PHASE_ERROR 0x4b
-#define ASC_MEDIA_REMOVAL_PREVENTED 0x53
-
-#define CFA_NO_ERROR 0x00
-#define CFA_MISC_ERROR 0x09
-#define CFA_INVALID_COMMAND 0x20
-#define CFA_INVALID_ADDRESS 0x21
-#define CFA_ADDRESS_OVERFLOW 0x2f
-
-#define SMART_READ_DATA 0xd0
-#define SMART_READ_THRESH 0xd1
-#define SMART_ATTR_AUTOSAVE 0xd2
-#define SMART_SAVE_ATTR 0xd3
-#define SMART_EXECUTE_OFFLINE 0xd4
-#define SMART_READ_LOG 0xd5
-#define SMART_WRITE_LOG 0xd6
-#define SMART_ENABLE 0xd8
-#define SMART_DISABLE 0xd9
-#define SMART_STATUS 0xda
-
-typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind;
-
-typedef void EndTransferFunc(IDEState *);
-
-typedef void DMAStartFunc(const IDEDMA *, IDEState *, BlockCompletionFunc *);
-typedef void DMAVoidFunc(const IDEDMA *);
-typedef int DMAIntFunc(const IDEDMA *, bool);
-typedef int32_t DMAInt32Func(const IDEDMA *, int32_t len);
-typedef void DMAu32Func(const IDEDMA *, uint32_t);
-typedef void DMAStopFunc(const IDEDMA *, bool);
-
-struct unreported_events {
- bool eject_request;
- bool new_media;
-};
-
-enum ide_dma_cmd {
- IDE_DMA_READ = 0,
- IDE_DMA_WRITE,
- IDE_DMA_TRIM,
- IDE_DMA_ATAPI,
- IDE_DMA__COUNT
-};
-
-extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT];
-
-#define ide_cmd_is_read(s) \
- ((s)->dma_cmd == IDE_DMA_READ)
-
-typedef struct IDEBufferedRequest {
- QLIST_ENTRY(IDEBufferedRequest) list;
- QEMUIOVector qiov;
- QEMUIOVector *original_qiov;
- BlockCompletionFunc *original_cb;
- void *original_opaque;
- bool orphaned;
-} IDEBufferedRequest;
-
-/* NOTE: IDEState represents in fact one drive */
-struct IDEState {
- IDEBus *bus;
- uint8_t unit;
- /* ide config */
- IDEDriveKind drive_kind;
- int cylinders, heads, sectors, chs_trans;
- int64_t nb_sectors;
- int mult_sectors;
- int identify_set;
- uint8_t identify_data[512];
- int drive_serial;
- char drive_serial_str[21];
- char drive_model_str[41];
- uint64_t wwn;
- /* ide regs */
- uint8_t feature;
- uint8_t error;
- uint32_t nsector;
- uint8_t sector;
- uint8_t lcyl;
- uint8_t hcyl;
- /* other part of tf for lba48 support */
- uint8_t hob_feature;
- uint8_t hob_nsector;
- uint8_t hob_sector;
- uint8_t hob_lcyl;
- uint8_t hob_hcyl;
-
- uint8_t select;
- uint8_t status;
-
- /* set for lba48 access */
- uint8_t lba48;
- BlockBackend *blk;
- char version[9];
- /* ATAPI specific */
- struct unreported_events events;
- uint8_t sense_key;
- uint8_t asc;
- bool tray_open;
- bool tray_locked;
- uint8_t cdrom_changed;
- int packet_transfer_size;
- int elementary_transfer_size;
- int32_t io_buffer_index;
- int lba;
- int cd_sector_size;
- int atapi_dma; /* true if dma is requested for the packet cmd */
- BlockAcctCookie acct;
- BlockAIOCB *pio_aiocb;
- QEMUIOVector qiov;
- QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
- /* ATA DMA state */
- uint64_t io_buffer_offset;
- int32_t io_buffer_size;
- QEMUSGList sg;
- /* PIO transfer handling */
- int req_nb_sectors; /* number of sectors per interrupt */
- EndTransferFunc *end_transfer_func;
- uint8_t *data_ptr;
- uint8_t *data_end;
- uint8_t *io_buffer;
- /* PIO save/restore */
- int32_t io_buffer_total_len;
- int32_t cur_io_buffer_offset;
- int32_t cur_io_buffer_len;
- uint8_t end_transfer_fn_idx;
- QEMUTimer *sector_write_timer; /* only used for win2k install hack */
- uint32_t irq_count; /* counts IRQs when using win2k install hack */
- /* CF-ATA extended error */
- uint8_t ext_error;
- /* CF-ATA metadata storage */
- uint32_t mdata_size;
- uint8_t *mdata_storage;
- int media_changed;
- enum ide_dma_cmd dma_cmd;
- /* SMART */
- uint8_t smart_enabled;
- uint8_t smart_autosave;
- int smart_errors;
- uint8_t smart_selftest_count;
- uint8_t *smart_selftest_data;
- /* AHCI */
- int ncq_queues;
-};
-
-struct IDEDMAOps {
- DMAStartFunc *start_dma;
- DMAVoidFunc *pio_transfer;
- DMAInt32Func *prepare_buf;
- DMAu32Func *commit_buf;
- DMAIntFunc *rw_buf;
- DMAVoidFunc *restart;
- DMAVoidFunc *restart_dma;
- DMAStopFunc *set_inactive;
- DMAVoidFunc *cmd_done;
- DMAVoidFunc *reset;
-};
-
-struct IDEDMA {
- const struct IDEDMAOps *ops;
- QEMUIOVector qiov;
- BlockAIOCB *aiocb;
-};
-
-struct IDEBus {
- BusState qbus;
- IDEDevice *master;
- IDEDevice *slave;
- IDEState ifs[2];
- QEMUBH *bh;
-
- int bus_id;
- int max_units;
- IDEDMA *dma;
- uint8_t unit;
- uint8_t cmd;
- qemu_irq irq;
-
- int error_status;
- uint8_t retry_unit;
- int64_t retry_sector_num;
- uint32_t retry_nsector;
- PortioList portio_list;
- PortioList portio2_list;
- VMChangeStateEntry *vmstate;
-};
-
-#define TYPE_IDE_DEVICE "ide-device"
-#define IDE_DEVICE(obj) \
- OBJECT_CHECK(IDEDevice, (obj), TYPE_IDE_DEVICE)
-#define IDE_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(IDEDeviceClass, (klass), TYPE_IDE_DEVICE)
-#define IDE_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IDEDeviceClass, (obj), TYPE_IDE_DEVICE)
-
-typedef struct IDEDeviceClass {
- DeviceClass parent_class;
- void (*realize)(IDEDevice *dev, Error **errp);
-} IDEDeviceClass;
-
-struct IDEDevice {
- DeviceState qdev;
- uint32_t unit;
- BlockConf conf;
- int chs_trans;
- char *version;
- char *serial;
- char *model;
- uint64_t wwn;
- /*
- * 0x0000 - rotation rate not reported
- * 0x0001 - non-rotating medium (SSD)
- * 0x0002-0x0400 - reserved
- * 0x0401-0xffe - rotations per minute
- * 0xffff - reserved
- */
- uint16_t rotation_rate;
-};
-
-/* These are used for the error_status field of IDEBus */
-#define IDE_RETRY_MASK 0xf8
-#define IDE_RETRY_DMA 0x08
-#define IDE_RETRY_PIO 0x10
-#define IDE_RETRY_ATAPI 0x20 /* reused IDE_RETRY_READ bit */
-#define IDE_RETRY_READ 0x20
-#define IDE_RETRY_FLUSH 0x40
-#define IDE_RETRY_TRIM 0x80
-#define IDE_RETRY_HBA 0x100
-
-#define IS_IDE_RETRY_DMA(_status) \
- ((_status) & IDE_RETRY_DMA)
-
-#define IS_IDE_RETRY_PIO(_status) \
- ((_status) & IDE_RETRY_PIO)
-
-/*
- * The method of the IDE_RETRY_ATAPI determination is to use a previously
- * impossible bit combination as a new status value.
- */
-#define IS_IDE_RETRY_ATAPI(_status) \
- (((_status) & IDE_RETRY_MASK) == IDE_RETRY_ATAPI)
-
-static inline uint8_t ide_dma_cmd_to_retry(uint8_t dma_cmd)
-{
- switch (dma_cmd) {
- case IDE_DMA_READ:
- return IDE_RETRY_DMA | IDE_RETRY_READ;
- case IDE_DMA_WRITE:
- return IDE_RETRY_DMA;
- case IDE_DMA_TRIM:
- return IDE_RETRY_DMA | IDE_RETRY_TRIM;
- case IDE_DMA_ATAPI:
- return IDE_RETRY_ATAPI;
- default:
- break;
- }
- return 0;
-}
-
-static inline IDEState *idebus_active_if(IDEBus *bus)
-{
- return bus->ifs + bus->unit;
-}
-
-static inline void ide_set_irq(IDEBus *bus)
-{
- if (!(bus->cmd & IDE_CMD_DISABLE_IRQ)) {
- qemu_irq_raise(bus->irq);
- }
-}
-
-/* hw/ide/core.c */
-extern const VMStateDescription vmstate_ide_bus;
-
-#define VMSTATE_IDE_BUS(_field, _state) \
- VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_bus, IDEBus)
-
-#define VMSTATE_IDE_BUS_ARRAY(_field, _state, _num) \
- VMSTATE_STRUCT_ARRAY(_field, _state, _num, 1, vmstate_ide_bus, IDEBus)
-
-extern const VMStateDescription vmstate_ide_drive;
-
-#define VMSTATE_IDE_DRIVES(_field, _state) \
- VMSTATE_STRUCT_ARRAY(_field, _state, 2, 3, vmstate_ide_drive, IDEState)
-
-#define VMSTATE_IDE_DRIVE(_field, _state) \
- VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_drive, IDEState)
-
-void ide_bus_reset(IDEBus *bus);
-int64_t ide_get_sector(IDEState *s);
-void ide_set_sector(IDEState *s, int64_t sector_num);
-
-void ide_start_dma(IDEState *s, BlockCompletionFunc *cb);
-void dma_buf_commit(IDEState *s, uint32_t tx_bytes);
-void ide_dma_error(IDEState *s);
-void ide_abort_command(IDEState *s);
-
-void ide_atapi_cmd_ok(IDEState *s);
-void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc);
-void ide_atapi_dma_restart(IDEState *s);
-void ide_atapi_io_error(IDEState *s, int ret);
-
-void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_ioport_read(void *opaque, uint32_t addr1);
-uint32_t ide_status_read(void *opaque, uint32_t addr);
-void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val);
-void ide_data_writew(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_data_readw(void *opaque, uint32_t addr);
-void ide_data_writel(void *opaque, uint32_t addr, uint32_t val);
-uint32_t ide_data_readl(void *opaque, uint32_t addr);
-
-int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
- const char *version, const char *serial, const char *model,
- uint64_t wwn,
- uint32_t cylinders, uint32_t heads, uint32_t secs,
- int chs_trans, Error **errp);
-void ide_init2(IDEBus *bus, qemu_irq irq);
-void ide_exit(IDEState *s);
-void ide_init_ioport(IDEBus *bus, ISADevice *isa, int iobase, int iobase2);
-void ide_register_restart_cb(IDEBus *bus);
-
-void ide_exec_cmd(IDEBus *bus, uint32_t val);
-
-void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
- EndTransferFunc *end_transfer_func);
-bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
- EndTransferFunc *end_transfer_func);
-void ide_transfer_stop(IDEState *s);
-void ide_set_inactive(IDEState *s, bool more);
-BlockAIOCB *ide_issue_trim(
- int64_t offset, QEMUIOVector *qiov,
- BlockCompletionFunc *cb, void *cb_opaque, void *opaque);
-BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque);
-void ide_cancel_dma_sync(IDEState *s);
-
-/* hw/ide/atapi.c */
-void ide_atapi_cmd(IDEState *s);
-void ide_atapi_cmd_reply_end(IDEState *s);
-
-/* hw/ide/qdev.c */
-void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
- int bus_id, int max_units);
-IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive);
-
-int ide_handle_rw_error(IDEState *s, int error, int op);
-
-#endif /* HW_IDE_INTERNAL_H */
diff --git a/include/hw/ide/isa.h b/include/hw/ide/isa.h
new file mode 100644
index 0000000000..1cd0ff1fa6
--- /dev/null
+++ b/include/hw/ide/isa.h
@@ -0,0 +1,20 @@
+/*
+ * QEMU IDE Emulation: ISA Bus support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef HW_IDE_ISA_H
+#define HW_IDE_ISA_H
+
+#include "qom/object.h"
+
+#define TYPE_ISA_IDE "isa-ide"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE)
+
+ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum,
+ DriveInfo *hd0, DriveInfo *hd1);
+
+#endif
diff --git a/include/hw/ide/mmio.h b/include/hw/ide/mmio.h
new file mode 100644
index 0000000000..d726a49848
--- /dev/null
+++ b/include/hw/ide/mmio.h
@@ -0,0 +1,26 @@
+/*
+ * QEMU IDE Emulation: mmio support (for embedded).
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef HW_IDE_MMIO_H
+#define HW_IDE_MMIO_H
+
+#include "qom/object.h"
+
+/*
+ * QEMU interface:
+ * + sysbus IRQ 0: asserted by the IDE channel
+ * + sysbus MMIO region 0: data registers
+ * + sysbus MMIO region 1: status & control registers
+ */
+#define TYPE_MMIO_IDE "mmio-ide"
+OBJECT_DECLARE_SIMPLE_TYPE(MMIOIDEState, MMIO_IDE)
+
+void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1);
+
+#endif
diff --git a/include/hw/ide/pci.h b/include/hw/ide/pci.h
index dd504e5a0b..ef03764caa 100644
--- a/include/hw/ide/pci.h
+++ b/include/hw/ide/pci.h
@@ -1,8 +1,9 @@
#ifndef HW_IDE_PCI_H
#define HW_IDE_PCI_H
-#include "hw/ide/internal.h"
-#include "hw/pci/pci.h"
+#include "hw/ide/ide-bus.h"
+#include "hw/pci/pci_device.h"
+#include "qom/object.h"
#define BM_STATUS_DMAING 0x01
#define BM_STATUS_ERROR 0x02
@@ -39,31 +40,28 @@ typedef struct BMDMAState {
} BMDMAState;
#define TYPE_PCI_IDE "pci-ide"
-#define PCI_IDE(obj) OBJECT_CHECK(PCIIDEState, (obj), TYPE_PCI_IDE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIIDEState, PCI_IDE)
-typedef struct PCIIDEState {
+struct PCIIDEState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
IDEBus bus[2];
BMDMAState bmdma[2];
+ qemu_irq isa_irq[2];
uint32_t secondary; /* used only for cmd646 */
MemoryRegion bmdma_bar;
MemoryRegion cmd_bar[2];
MemoryRegion data_bar[2];
-} PCIIDEState;
-
-static inline IDEState *bmdma_active_if(BMDMAState *bmdma)
-{
- assert(bmdma->bus->retry_unit != (uint8_t)-1);
- return bmdma->bus->ifs + bmdma->bus->retry_unit;
-}
+};
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d);
void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val);
+void bmdma_status_writeb(BMDMAState *bm, uint32_t val);
extern MemoryRegionOps bmdma_addr_ioport_ops;
void pci_ide_create_devs(PCIDevice *dev);
+void pci_ide_update_mode(PCIIDEState *s);
extern const VMStateDescription vmstate_ide_pci;
extern const MemoryRegionOps pci_ide_cmd_le_ops;
diff --git a/include/hw/ide/piix.h b/include/hw/ide/piix.h
new file mode 100644
index 0000000000..ef3ef3d62d
--- /dev/null
+++ b/include/hw/ide/piix.h
@@ -0,0 +1,7 @@
+#ifndef HW_IDE_PIIX_H
+#define HW_IDE_PIIX_H
+
+#define TYPE_PIIX3_IDE "piix3-ide"
+#define TYPE_PIIX4_IDE "piix4-ide"
+
+#endif /* HW_IDE_PIIX_H */
diff --git a/include/hw/input/adb.h b/include/hw/input/adb.h
index bb75a7b1e3..20fced15f7 100644
--- a/include/hw/input/adb.h
+++ b/include/hw/input/adb.h
@@ -27,12 +27,12 @@
#define ADB_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define MAX_ADB_DEVICES 16
#define ADB_MAX_OUT_LEN 16
-typedef struct ADBBusState ADBBusState;
typedef struct ADBDevice ADBDevice;
/* buf = NULL means polling */
@@ -42,7 +42,7 @@ typedef int ADBDeviceRequest(ADBDevice *d, uint8_t *buf_out,
typedef bool ADBDeviceHasData(ADBDevice *d);
#define TYPE_ADB_DEVICE "adb-device"
-#define ADB_DEVICE(obj) OBJECT_CHECK(ADBDevice, (obj), TYPE_ADB_DEVICE)
+OBJECT_DECLARE_TYPE(ADBDevice, ADBDeviceClass, ADB_DEVICE)
struct ADBDevice {
/*< private >*/
@@ -53,22 +53,18 @@ struct ADBDevice {
int handler;
};
-#define ADB_DEVICE_CLASS(cls) \
- OBJECT_CLASS_CHECK(ADBDeviceClass, (cls), TYPE_ADB_DEVICE)
-#define ADB_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ADBDeviceClass, (obj), TYPE_ADB_DEVICE)
-typedef struct ADBDeviceClass {
+struct ADBDeviceClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
ADBDeviceRequest *devreq;
ADBDeviceHasData *devhasdata;
-} ADBDeviceClass;
+};
#define TYPE_ADB_BUS "apple-desktop-bus"
-#define ADB_BUS(obj) OBJECT_CHECK(ADBBusState, (obj), TYPE_ADB_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(ADBBusState, ADB_BUS)
#define ADB_STATUS_BUSTIMEOUT 0x1
#define ADB_STATUS_POLLREPLY 0x2
diff --git a/include/hw/input/gamepad.h b/include/hw/input/gamepad.h
deleted file mode 100644
index 6f6aa2406a..0000000000
--- a/include/hw/input/gamepad.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Gamepad style buttons connected to IRQ/GPIO lines
- *
- * Copyright (c) 2007 CodeSourcery.
- * Written by Paul Brook
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_INPUT_GAMEPAD_H
-#define HW_INPUT_GAMEPAD_H
-
-
-/* stellaris_input.c */
-void stellaris_gamepad_init(int n, qemu_irq *irq, const int *keycode);
-
-#endif
diff --git a/include/hw/input/i8042.h b/include/hw/input/i8042.h
index 8eaebf50ce..e90f008b66 100644
--- a/include/hw/input/i8042.h
+++ b/include/hw/input/i8042.h
@@ -9,17 +9,100 @@
#define HW_INPUT_I8042_H
#include "hw/isa/isa.h"
+#include "hw/sysbus.h"
+#include "hw/input/ps2.h"
+#include "qom/object.h"
+
+#define I8042_KBD_IRQ 0
+#define I8042_MOUSE_IRQ 1
+
+typedef struct KBDState {
+ uint8_t write_cmd; /* if non zero, write data to port 60 is expected */
+ uint8_t status;
+ uint8_t mode;
+ uint8_t outport;
+ uint32_t migration_flags;
+ uint32_t obsrc;
+ bool outport_present;
+ bool extended_state;
+ bool extended_state_loaded;
+ /* Bitmask of devices with data available. */
+ uint8_t pending;
+ uint8_t obdata;
+ uint8_t cbdata;
+ uint8_t pending_tmp;
+ PS2KbdState ps2kbd;
+ PS2MouseState ps2mouse;
+ QEMUTimer *throttle_timer;
+
+ qemu_irq irqs[2];
+ qemu_irq a20_out;
+ hwaddr mask;
+} KBDState;
+
+/*
+ * QEMU interface:
+ * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2
+ * keyboard device has asserted its irq
+ * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2
+ * mouse device has asserted its irq
+ * + Named GPIO output "a20": A20 line for x86 PCs
+ * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1)
+ */
#define TYPE_I8042 "i8042"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAKBDState, I8042)
+
+struct ISAKBDState {
+ ISADevice parent_obj;
+
+ KBDState kbd;
+ bool kbd_throttle;
+ MemoryRegion io[2];
+ uint8_t kbd_irq;
+ uint8_t mouse_irq;
+};
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion defining the command/status/data
+ * registers (access determined by mask property and access type)
+ * + Named GPIO input "ps2-kbd-input-irq": set to 1 if the downstream PS2
+ * keyboard device has asserted its irq
+ * + Named GPIO input "ps2-mouse-input-irq": set to 1 if the downstream PS2
+ * mouse device has asserted its irq
+ * + Unnamed GPIO output 0-1: i8042 output irqs for keyboard (0) or mouse (1)
+ */
+
+#define TYPE_I8042_MMIO "i8042-mmio"
+OBJECT_DECLARE_SIMPLE_TYPE(MMIOKBDState, I8042_MMIO)
+
+struct MMIOKBDState {
+ SysBusDevice parent_obj;
+
+ KBDState kbd;
+ uint32_t size;
+ MemoryRegion region;
+};
#define I8042_A20_LINE "a20"
-typedef struct ISAKBDState ISAKBDState;
-void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq,
- MemoryRegion *region, ram_addr_t size,
- hwaddr mask);
void i8042_isa_mouse_fake_event(ISAKBDState *isa);
-void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out);
+
+static inline bool i8042_present(void)
+{
+ bool amb = false;
+ return object_resolve_path_type("", TYPE_I8042, &amb) || amb;
+}
+
+/*
+ * ACPI v2, Table 5-10 - Fixed ACPI Description Table Boot Architecture
+ * Flags, bit offset 1 - 8042.
+ */
+static inline uint16_t iapc_boot_arch_8042(void)
+{
+ return i8042_present() ? 0x1 << 1 : 0x0 ;
+}
#endif /* HW_INPUT_I8042_H */
diff --git a/include/hw/input/lasips2.h b/include/hw/input/lasips2.h
index 0cd7b59064..01911c50f9 100644
--- a/include/hw/input/lasips2.h
+++ b/include/hw/input/lasips2.h
@@ -4,13 +4,77 @@
* Copyright (c) 2019 Sven Schnelle
*
*/
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion defining the LASI PS2 keyboard
+ * registers
+ * + sysbus MMIO region 1: MemoryRegion defining the LASI PS2 mouse
+ * registers
+ * + sysbus IRQ 0: LASI PS2 output irq
+ * + Named GPIO input "lasips2-port-input-irq[0..1]": set to 1 if the downstream
+ * LASIPS2Port has asserted its irq
+ */
+
#ifndef HW_INPUT_LASIPS2_H
#define HW_INPUT_LASIPS2_H
#include "exec/hwaddr.h"
+#include "hw/sysbus.h"
+#include "hw/input/ps2.h"
-#define TYPE_LASIPS2 "lasips2"
+#define TYPE_LASIPS2_PORT "lasips2-port"
+OBJECT_DECLARE_TYPE(LASIPS2Port, LASIPS2PortDeviceClass, LASIPS2_PORT)
+
+struct LASIPS2PortDeviceClass {
+ DeviceClass parent;
+
+ DeviceRealize parent_realize;
+};
+
+typedef struct LASIPS2State LASIPS2State;
+
+struct LASIPS2Port {
+ DeviceState parent_obj;
+
+ LASIPS2State *lasips2;
+ MemoryRegion reg;
+ PS2State *ps2dev;
+ uint8_t id;
+ uint8_t control;
+ uint8_t buf;
+ bool loopback_rbne;
+ qemu_irq irq;
+};
-void lasips2_init(MemoryRegion *address_space, hwaddr base, qemu_irq irq);
+#define TYPE_LASIPS2_KBD_PORT "lasips2-kbd-port"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2KbdPort, LASIPS2_KBD_PORT)
+
+struct LASIPS2KbdPort {
+ LASIPS2Port parent_obj;
+
+ PS2KbdState kbd;
+};
+
+#define TYPE_LASIPS2_MOUSE_PORT "lasips2-mouse-port"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2MousePort, LASIPS2_MOUSE_PORT)
+
+struct LASIPS2MousePort {
+ LASIPS2Port parent_obj;
+
+ PS2MouseState mouse;
+};
+
+struct LASIPS2State {
+ SysBusDevice parent_obj;
+
+ LASIPS2KbdPort kbd_port;
+ LASIPS2MousePort mouse_port;
+ uint8_t int_status;
+ qemu_irq irq;
+};
+
+#define TYPE_LASIPS2 "lasips2"
+OBJECT_DECLARE_SIMPLE_TYPE(LASIPS2State, LASIPS2)
#endif /* HW_INPUT_LASIPS2_H */
diff --git a/include/hw/input/lm832x.h b/include/hw/input/lm832x.h
new file mode 100644
index 0000000000..e0e5d5ef20
--- /dev/null
+++ b/include/hw/input/lm832x.h
@@ -0,0 +1,28 @@
+/*
+ * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INPUT_LM832X_H
+#define HW_INPUT_LM832X_H
+
+#define TYPE_LM8323 "lm8323"
+
+void lm832x_key_event(DeviceState *dev, int key, int state);
+
+#endif
diff --git a/include/hw/input/pl050.h b/include/hw/input/pl050.h
new file mode 100644
index 0000000000..4cb8985f31
--- /dev/null
+++ b/include/hw/input/pl050.h
@@ -0,0 +1,58 @@
+/*
+ * Arm PrimeCell PL050 Keyboard / Mouse Interface
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifndef HW_PL050_H
+#define HW_PL050_H
+
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/input/ps2.h"
+#include "hw/irq.h"
+
+struct PL050DeviceClass {
+ SysBusDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+#define TYPE_PL050 "pl050"
+OBJECT_DECLARE_TYPE(PL050State, PL050DeviceClass, PL050)
+
+struct PL050State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ PS2State *ps2dev;
+ uint32_t cr;
+ uint32_t clk;
+ uint32_t last;
+ int pending;
+ qemu_irq irq;
+ bool is_mouse;
+};
+
+#define TYPE_PL050_KBD_DEVICE "pl050_keyboard"
+OBJECT_DECLARE_SIMPLE_TYPE(PL050KbdState, PL050_KBD_DEVICE)
+
+struct PL050KbdState {
+ PL050State parent_obj;
+
+ PS2KbdState kbd;
+};
+
+#define TYPE_PL050_MOUSE_DEVICE "pl050_mouse"
+OBJECT_DECLARE_SIMPLE_TYPE(PL050MouseState, PL050_MOUSE_DEVICE)
+
+struct PL050MouseState {
+ PL050State parent_obj;
+
+ PS2MouseState mouse;
+};
+
+#endif
diff --git a/include/hw/input/ps2.h b/include/hw/input/ps2.h
index 35d983897a..cd61a634c3 100644
--- a/include/hw/input/ps2.h
+++ b/include/hw/input/ps2.h
@@ -25,28 +25,89 @@
#ifndef HW_PS2_H
#define HW_PS2_H
+#include "hw/sysbus.h"
+
#define PS2_MOUSE_BUTTON_LEFT 0x01
#define PS2_MOUSE_BUTTON_RIGHT 0x02
#define PS2_MOUSE_BUTTON_MIDDLE 0x04
#define PS2_MOUSE_BUTTON_SIDE 0x08
#define PS2_MOUSE_BUTTON_EXTRA 0x10
-typedef struct PS2State PS2State;
+struct PS2DeviceClass {
+ SysBusDeviceClass parent_class;
+
+ ResettablePhases parent_phases;
+};
+
+/*
+ * PS/2 buffer size. Keep 256 bytes for compatibility with
+ * older QEMU versions.
+ */
+#define PS2_BUFFER_SIZE 256
+
+typedef struct {
+ uint8_t data[PS2_BUFFER_SIZE];
+ int rptr, wptr, cwptr, count;
+} PS2Queue;
+
+/* Output IRQ */
+#define PS2_DEVICE_IRQ 0
+
+struct PS2State {
+ SysBusDevice parent_obj;
+
+ PS2Queue queue;
+ int32_t write_cmd;
+ qemu_irq irq;
+};
+
+#define TYPE_PS2_DEVICE "ps2-device"
+OBJECT_DECLARE_TYPE(PS2State, PS2DeviceClass, PS2_DEVICE)
+
+struct PS2KbdState {
+ PS2State parent_obj;
+
+ int scan_enabled;
+ int translate;
+ int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */
+ int ledstate;
+ bool need_high_bit;
+ unsigned int modifiers; /* bitmask of MOD_* constants above */
+};
+
+#define TYPE_PS2_KBD_DEVICE "ps2-kbd"
+OBJECT_DECLARE_SIMPLE_TYPE(PS2KbdState, PS2_KBD_DEVICE)
+
+struct PS2MouseState {
+ PS2State parent_obj;
+
+ uint8_t mouse_status;
+ uint8_t mouse_resolution;
+ uint8_t mouse_sample_rate;
+ uint8_t mouse_wrap;
+ uint8_t mouse_type; /* 0 = PS2, 3 = IMPS/2, 4 = IMEX */
+ uint8_t mouse_detect_state;
+ int mouse_dx; /* current values, needed for 'poll' mode */
+ int mouse_dy;
+ int mouse_dz;
+ int mouse_dw;
+ uint8_t mouse_buttons;
+};
+
+#define TYPE_PS2_MOUSE_DEVICE "ps2-mouse"
+OBJECT_DECLARE_SIMPLE_TYPE(PS2MouseState, PS2_MOUSE_DEVICE)
/* ps2.c */
-void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg);
-void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg);
-void ps2_write_mouse(void *, int val);
-void ps2_write_keyboard(void *, int val);
+void ps2_write_mouse(PS2MouseState *s, int val);
+void ps2_write_keyboard(PS2KbdState *s, int val);
uint32_t ps2_read_data(PS2State *s);
void ps2_queue_noirq(PS2State *s, int b);
-void ps2_raise_irq(PS2State *s);
void ps2_queue(PS2State *s, int b);
void ps2_queue_2(PS2State *s, int b1, int b2);
void ps2_queue_3(PS2State *s, int b1, int b2, int b3);
void ps2_queue_4(PS2State *s, int b1, int b2, int b3, int b4);
-void ps2_keyboard_set_translation(void *opaque, int mode);
-void ps2_mouse_fake_event(void *opaque);
+void ps2_keyboard_set_translation(PS2KbdState *s, int mode);
+void ps2_mouse_fake_event(PS2MouseState *s);
int ps2_queue_empty(PS2State *s);
#endif /* HW_PS2_H */
diff --git a/include/hw/input/stellaris_gamepad.h b/include/hw/input/stellaris_gamepad.h
new file mode 100644
index 0000000000..51085e166c
--- /dev/null
+++ b/include/hw/input/stellaris_gamepad.h
@@ -0,0 +1,37 @@
+/*
+ * Gamepad style buttons connected to IRQ/GPIO lines
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_INPUT_STELLARIS_GAMEPAD_H
+#define HW_INPUT_STELLARIS_GAMEPAD_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+/*
+ * QEMU interface:
+ * + QOM array property "keycodes": uint32_t QEMU keycodes to handle
+ * (these are QCodes, ie the Q_KEY_* values)
+ * + unnamed GPIO outputs: one per keycode, in the same order as the
+ * "keycodes" array property entries; asserted when key is down
+ */
+
+#define TYPE_STELLARIS_GAMEPAD "stellaris-gamepad"
+OBJECT_DECLARE_SIMPLE_TYPE(StellarisGamepad, STELLARIS_GAMEPAD)
+
+struct StellarisGamepad {
+ SysBusDevice parent_obj;
+
+ uint32_t num_buttons;
+ qemu_irq *irqs;
+ uint32_t *keycodes;
+ uint8_t *pressed;
+};
+
+#endif
diff --git a/include/hw/input/tsc2xxx.h b/include/hw/input/tsc2xxx.h
index 3cd8f1bf55..00eca17674 100644
--- a/include/hw/input/tsc2xxx.h
+++ b/include/hw/input/tsc2xxx.h
@@ -11,7 +11,13 @@
#ifndef HW_INPUT_TSC2XXX_H
#define HW_INPUT_TSC2XXX_H
-#include "ui/console.h"
+typedef struct MouseTransformInfo {
+ /* Touchscreen resolution */
+ int x;
+ int y;
+ /* Calibration values as used/generated by tslib */
+ int a[7];
+} MouseTransformInfo;
typedef struct uWireSlave {
uint16_t (*receive)(void *opaque);
@@ -24,12 +30,12 @@ uWireSlave *tsc2102_init(qemu_irq pint);
uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav);
I2SCodec *tsc210x_codec(uWireSlave *chip);
uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len);
-void tsc210x_set_transform(uWireSlave *chip, MouseTransformInfo *info);
+void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info);
void tsc210x_key_event(uWireSlave *chip, int key, int down);
/* tsc2005.c */
void *tsc2005_init(qemu_irq pintdav);
uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len);
-void tsc2005_set_transform(void *opaque, MouseTransformInfo *info);
+void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info);
#endif
diff --git a/include/hw/intc/allwinner-a10-pic.h b/include/hw/intc/allwinner-a10-pic.h
index a5895401d1..b8364d3ed4 100644
--- a/include/hw/intc/allwinner-a10-pic.h
+++ b/include/hw/intc/allwinner-a10-pic.h
@@ -2,9 +2,10 @@
#define ALLWINNER_A10_PIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_AW_A10_PIC "allwinner-a10-pic"
-#define AW_A10_PIC(obj) OBJECT_CHECK(AwA10PICState, (obj), TYPE_AW_A10_PIC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10PICState, AW_A10_PIC)
#define AW_A10_PIC_VECTOR 0
#define AW_A10_PIC_BASE_ADDR 4
@@ -19,7 +20,7 @@
#define AW_A10_PIC_INT_NR 95
#define AW_A10_PIC_REG_NUM DIV_ROUND_UP(AW_A10_PIC_INT_NR, 32)
-typedef struct AwA10PICState {
+struct AwA10PICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -37,6 +38,6 @@ typedef struct AwA10PICState {
uint32_t enable[AW_A10_PIC_REG_NUM];
uint32_t mask[AW_A10_PIC_REG_NUM];
/*priority setting here*/
-} AwA10PICState;
+};
#endif
diff --git a/include/hw/intc/arm_gic.h b/include/hw/intc/arm_gic.h
index 303b9748cb..48f6a51a70 100644
--- a/include/hw/intc/arm_gic.h
+++ b/include/hw/intc/arm_gic.h
@@ -65,6 +65,7 @@
#define HW_ARM_GIC_H
#include "arm_gic_common.h"
+#include "qom/object.h"
/* Number of SGI target-list bits */
#define GIC_TARGETLIST_BITS 8
@@ -72,19 +73,19 @@
#define GIC_MIN_PRIORITY_BITS 4
#define TYPE_ARM_GIC "arm_gic"
-#define ARM_GIC(obj) \
- OBJECT_CHECK(GICState, (obj), TYPE_ARM_GIC)
-#define ARM_GIC_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICClass, (klass), TYPE_ARM_GIC)
-#define ARM_GIC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICClass, (obj), TYPE_ARM_GIC)
+typedef struct ARMGICClass ARMGICClass;
+/* This is reusing the GICState typedef from TYPE_ARM_GIC_COMMON */
+DECLARE_OBJ_CHECKERS(GICState, ARMGICClass,
+ ARM_GIC, TYPE_ARM_GIC)
-typedef struct ARMGICClass {
+struct ARMGICClass {
/*< private >*/
ARMGICCommonClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} ARMGICClass;
+};
+
+const char *gic_class_name(void);
#endif
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index 6e0d6b8a88..7080375008 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -22,6 +22,7 @@
#define HW_ARM_GIC_COMMON_H
#include "hw/sysbus.h"
+#include "qom/object.h"
/* Maximum number of possible interrupts, determined by the GIC architecture */
#define GIC_MAXIRQ 1020
@@ -61,7 +62,7 @@ typedef struct gic_irq_state {
uint8_t group;
} gic_irq_state;
-typedef struct GICState {
+struct GICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -143,24 +144,22 @@ typedef struct GICState {
bool irq_reset_nonsecure; /* configure IRQs as group 1 (NS) on reset? */
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
-} GICState;
+};
+typedef struct GICState GICState;
#define TYPE_ARM_GIC_COMMON "arm_gic_common"
-#define ARM_GIC_COMMON(obj) \
- OBJECT_CHECK(GICState, (obj), TYPE_ARM_GIC_COMMON)
-#define ARM_GIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICCommonClass, (klass), TYPE_ARM_GIC_COMMON)
-#define ARM_GIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICCommonClass, (obj), TYPE_ARM_GIC_COMMON)
-
-typedef struct ARMGICCommonClass {
+typedef struct ARMGICCommonClass ARMGICCommonClass;
+DECLARE_OBJ_CHECKERS(GICState, ARMGICCommonClass,
+ ARM_GIC_COMMON, TYPE_ARM_GIC_COMMON)
+
+struct ARMGICCommonClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
void (*pre_save)(GICState *s);
void (*post_load)(GICState *s);
-} ARMGICCommonClass;
+};
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
const MemoryRegionOps *ops,
diff --git a/include/hw/intc/arm_gicv3.h b/include/hw/intc/arm_gicv3.h
index 4a6fd85e22..a81a6ae7ec 100644
--- a/include/hw/intc/arm_gicv3.h
+++ b/include/hw/intc/arm_gicv3.h
@@ -13,20 +13,20 @@
#define HW_ARM_GICV3_H
#include "arm_gicv3_common.h"
+#include "qom/object.h"
#define TYPE_ARM_GICV3 "arm-gicv3"
-#define ARM_GICV3(obj) OBJECT_CHECK(GICv3State, (obj), TYPE_ARM_GICV3)
-#define ARM_GICV3_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICv3Class, (klass), TYPE_ARM_GICV3)
-#define ARM_GICV3_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICv3Class, (obj), TYPE_ARM_GICV3)
+typedef struct ARMGICv3Class ARMGICv3Class;
+/* This is reusing the GICState typedef from TYPE_ARM_GICV3_COMMON */
+DECLARE_OBJ_CHECKERS(GICv3State, ARMGICv3Class,
+ ARM_GICV3, TYPE_ARM_GICV3)
-typedef struct ARMGICv3Class {
+struct ARMGICv3Class {
/*< private >*/
ARMGICv3CommonClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} ARMGICv3Class;
+};
#endif
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 31ec9a1ae4..4e2fb518e7 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -26,6 +26,7 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic_common.h"
+#include "qom/object.h"
/*
* Maximum number of possible interrupts, determined by the GIC architecture.
@@ -35,7 +36,14 @@
#define GICV3_MAXIRQ 1020
#define GICV3_MAXSPI (GICV3_MAXIRQ - GIC_INTERNAL)
+#define GICV3_LPI_INTID_START 8192
+
+/*
+ * The redistributor in GICv3 has two 64KB frames per CPU; in
+ * GICv4 it has four 64KB frames per CPU.
+ */
#define GICV3_REDIST_SIZE 0x20000
+#define GICV4_REDIST_SIZE 0x40000
/* Number of SGI target-list bits */
#define GICV3_TARGETLIST_BITS 16
@@ -43,11 +51,6 @@
/* Maximum number of list registers (architectural limit) */
#define GICV3_LR_MAX 16
-/* Minimum BPR for Secure, or when security not enabled */
-#define GIC_MIN_BPR 0
-/* Minimum BPR for Nonsecure when security is enabled */
-#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1)
-
/* For some distributor fields we want to model the array of 32-bit
* register values which hold various bitmaps corresponding to enabled,
* pending, etc bits. These macros and functions facilitate that; the
@@ -152,7 +155,6 @@ struct GICv3CPUState {
qemu_irq parent_fiq;
qemu_irq parent_virq;
qemu_irq parent_vfiq;
- qemu_irq maintenance_irq;
/* Redistributor */
uint32_t level; /* Current IRQ level */
@@ -172,6 +174,9 @@ struct GICv3CPUState {
uint32_t gicr_igrpmodr0;
uint32_t gicr_nsacr;
uint8_t gicr_ipriorityr[GIC_INTERNAL];
+ /* VLPI_base page registers */
+ uint64_t gicr_vpropbaser;
+ uint64_t gicr_vpendbaser;
/* CPU interface */
uint64_t icc_sre_el1;
@@ -196,36 +201,63 @@ struct GICv3CPUState {
int num_list_regs;
int vpribits; /* number of virtual priority bits */
int vprebits; /* number of virtual preemption bits */
+ int pribits; /* number of physical priority bits */
+ int prebits; /* number of physical preemption bits */
/* Current highest priority pending interrupt for this CPU.
* This is cached information that can be recalculated from the
* real state above; it doesn't need to be migrated.
*/
PendingIrq hppi;
+
+ /*
+ * Cached information recalculated from LPI tables
+ * in guest memory
+ */
+ PendingIrq hpplpi;
+
+ /* Cached information recalculated from vLPI tables in guest memory */
+ PendingIrq hppvlpi;
+
/* This is temporary working state, to avoid a malloc in gicv3_update() */
bool seenbetter;
};
+/*
+ * The redistributor pages might be split into more than one region
+ * on some machine types if there are many CPUs.
+ */
+typedef struct GICv3RedistRegion {
+ GICv3State *gic;
+ MemoryRegion iomem;
+ uint32_t cpuidx; /* index of first CPU this region covers */
+} GICv3RedistRegion;
+
struct GICv3State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem_dist; /* Distributor */
- MemoryRegion *iomem_redist; /* Redistributor Regions */
+ GICv3RedistRegion *redist_regions; /* Redistributor Regions */
uint32_t *redist_region_count; /* redistributor count within each region */
uint32_t nb_redist_regions; /* number of redist regions */
uint32_t num_cpu;
uint32_t num_irq;
uint32_t revision;
+ bool lpi_enable;
bool security_extn;
+ bool force_8bit_prio;
bool irq_reset_nonsecure;
bool gicd_no_migration_shift_bug;
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
+ MemoryRegion *dma;
+ AddressSpace dma_as;
+
/* Distributor */
/* for a GIC with the security extensions the NS banked version of this
@@ -249,6 +281,8 @@ struct GICv3State {
uint32_t gicd_nsacr[DIV_ROUND_UP(GICV3_MAXIRQ, 16)];
GICv3CPUState *cpu;
+ /* List of all ITSes connected to this GIC */
+ GPtrArray *itslist;
};
#define GICV3_BITMAP_ACCESSORS(BMP) \
@@ -279,23 +313,30 @@ GICV3_BITMAP_ACCESSORS(level)
GICV3_BITMAP_ACCESSORS(edge_trigger)
#define TYPE_ARM_GICV3_COMMON "arm-gicv3-common"
-#define ARM_GICV3_COMMON(obj) \
- OBJECT_CHECK(GICv3State, (obj), TYPE_ARM_GICV3_COMMON)
-#define ARM_GICV3_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(ARMGICv3CommonClass, (klass), TYPE_ARM_GICV3_COMMON)
-#define ARM_GICV3_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ARMGICv3CommonClass, (obj), TYPE_ARM_GICV3_COMMON)
-
-typedef struct ARMGICv3CommonClass {
+typedef struct ARMGICv3CommonClass ARMGICv3CommonClass;
+DECLARE_OBJ_CHECKERS(GICv3State, ARMGICv3CommonClass,
+ ARM_GICV3_COMMON, TYPE_ARM_GICV3_COMMON)
+
+struct ARMGICv3CommonClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
void (*pre_save)(GICv3State *s);
void (*post_load)(GICv3State *s);
-} ARMGICv3CommonClass;
+};
void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops, Error **errp);
+ const MemoryRegionOps *ops);
+
+/**
+ * gicv3_class_name
+ *
+ * Return name of GICv3 class to use depending on whether KVM acceleration is
+ * in use. May throw an error if the chosen implementation is not available.
+ *
+ * Returns: class name to use
+ */
+const char *gicv3_class_name(void);
#endif
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
index fd1fe64c03..7dc712b38d 100644
--- a/include/hw/intc/arm_gicv3_its_common.h
+++ b/include/hw/intc/arm_gicv3_its_common.h
@@ -23,6 +23,9 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gicv3_common.h"
+#include "qom/object.h"
+
+#define TYPE_ARM_GICV3_ITS "arm-gicv3-its"
#define ITS_CONTROL_SIZE 0x10000
#define ITS_TRANS_SIZE 0x10000
@@ -30,11 +33,27 @@
#define GITS_CTLR 0x0
#define GITS_IIDR 0x4
+#define GITS_TYPER 0x8
#define GITS_CBASER 0x80
#define GITS_CWRITER 0x88
#define GITS_CREADR 0x90
#define GITS_BASER 0x100
+#define GITS_TRANSLATER 0x0040
+
+typedef struct {
+ bool indirect;
+ uint16_t entry_sz;
+ uint32_t page_sz;
+ uint32_t num_entries;
+ uint64_t base_addr;
+} TableDesc;
+
+typedef struct {
+ uint32_t num_entries;
+ uint64_t base_addr;
+} CmdQDesc;
+
struct GICv3ITSState {
SysBusDevice parent_obj;
@@ -51,25 +70,47 @@ struct GICv3ITSState {
/* Registers */
uint32_t ctlr;
uint32_t iidr;
+ uint64_t typer;
uint64_t cbaser;
uint64_t cwriter;
uint64_t creadr;
uint64_t baser[8];
+ TableDesc dt;
+ TableDesc ct;
+ TableDesc vpet;
+ CmdQDesc cq;
+
Error *migration_blocker;
};
typedef struct GICv3ITSState GICv3ITSState;
-void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops);
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
+ const MemoryRegionOps *tops);
+
+/*
+ * The ITS should call this when it is realized to add itself
+ * to its GIC's list of connected ITSes.
+ */
+static inline void gicv3_add_its(GICv3State *s, DeviceState *its)
+{
+ g_ptr_array_add(s->itslist, its);
+}
+
+/*
+ * The ITS can use this for operations that must be performed on
+ * every ITS connected to the same GIC that it is
+ */
+static inline void gicv3_foreach_its(GICv3State *s, GFunc func, void *opaque)
+{
+ g_ptr_array_foreach(s->itslist, func, opaque);
+}
#define TYPE_ARM_GICV3_ITS_COMMON "arm-gicv3-its-common"
-#define ARM_GICV3_ITS_COMMON(obj) \
- OBJECT_CHECK(GICv3ITSState, (obj), TYPE_ARM_GICV3_ITS_COMMON)
-#define ARM_GICV3_ITS_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(GICv3ITSCommonClass, (klass), TYPE_ARM_GICV3_ITS_COMMON)
-#define ARM_GICV3_ITS_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(GICv3ITSCommonClass, (obj), TYPE_ARM_GICV3_ITS_COMMON)
+typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
+DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSCommonClass,
+ ARM_GICV3_ITS_COMMON, TYPE_ARM_GICV3_ITS_COMMON)
struct GICv3ITSCommonClass {
/*< private >*/
@@ -81,6 +122,14 @@ struct GICv3ITSCommonClass {
void (*post_load)(GICv3ITSState *s);
};
-typedef struct GICv3ITSCommonClass GICv3ITSCommonClass;
+/**
+ * its_class_name:
+ *
+ * Return the ITS class name to use depending on whether KVM acceleration
+ * and KVM CAP_SIGNAL_MSI are supported
+ *
+ * Returns: class name to use or NULL
+ */
+const char *its_class_name(void);
#endif
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index a472c9b8f0..89fe8aedaa 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -10,14 +10,13 @@
#ifndef HW_ARM_ARMV7M_NVIC_H
#define HW_ARM_ARMV7M_NVIC_H
-#include "target/arm/cpu.h"
+#include "target/arm/cpu-qom.h"
#include "hw/sysbus.h"
#include "hw/timer/armv7m_systick.h"
+#include "qom/object.h"
#define TYPE_NVIC "armv7m_nvic"
-
-#define NVIC(obj) \
- OBJECT_CHECK(NVICState, (obj), TYPE_NVIC)
+OBJECT_DECLARE_SIMPLE_TYPE(NVICState, NVIC)
/* Highest permitted number of exceptions (architectural limit) */
#define NVIC_MAX_VECTORS 512
@@ -35,7 +34,7 @@ typedef struct VecInfo {
uint8_t level; /* exceptions <=15 never set level */
} VecInfo;
-typedef struct NVICState {
+struct NVICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -75,19 +74,136 @@ typedef struct NVICState {
*/
bool vectpending_is_s_banked;
int exception_prio; /* group prio of the highest prio active exception */
- int vectpending_prio; /* group prio of the exeception in vectpending */
+ int vectpending_prio; /* group prio of the exception in vectpending */
MemoryRegion sysregmem;
- MemoryRegion sysreg_ns_mem;
- MemoryRegion systickmem;
- MemoryRegion systick_ns_mem;
- MemoryRegion container;
uint32_t num_irq;
qemu_irq excpout;
qemu_irq sysresetreq;
+};
- SysTickState systick[M_REG_NUM_BANKS];
-} NVICState;
+/* Interface between CPU and Interrupt controller. */
+/**
+ * armv7m_nvic_set_pending: mark the specified exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Marks the specified exception as pending. Note that we will assert()
+ * if @secure is true and @irq does not specify one of the fixed set
+ * of architecturally banked exceptions.
+ */
+void armv7m_nvic_set_pending(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_derived: mark this derived exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for derived
+ * exceptions (exceptions generated in the course of trying to take
+ * a different exception).
+ */
+void armv7m_nvic_set_pending_derived(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
+ * generated in the course of lazy stacking of FP registers.
+ */
+void armv7m_nvic_set_pending_lazyfp(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_get_pending_irq_info: return highest priority pending
+ * exception, and whether it targets Secure state
+ * @s: the NVIC
+ * @pirq: set to pending exception number
+ * @ptargets_secure: set to whether pending exception targets Secure
+ *
+ * This function writes the number of the highest priority pending
+ * exception (the one which would be made active by
+ * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
+ * to true if the current highest priority pending exception should
+ * be taken to Secure state, false for NS.
+ */
+void armv7m_nvic_get_pending_irq_info(NVICState *s, int *pirq,
+ bool *ptargets_secure);
+/**
+ * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
+ * @s: the NVIC
+ *
+ * Move the current highest priority pending exception from the pending
+ * state to the active state, and update v7m.exception to indicate that
+ * it is the exception currently being handled.
+ */
+void armv7m_nvic_acknowledge_irq(NVICState *s);
+/**
+ * armv7m_nvic_complete_irq: complete specified interrupt or exception
+ * @s: the NVIC
+ * @irq: the exception number to complete
+ * @secure: true if this exception was secure
+ *
+ * Returns: -1 if the irq was not active
+ * 1 if completing this irq brought us back to base (no active irqs)
+ * 0 if there is still an irq active after this one was completed
+ * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
+ */
+int armv7m_nvic_complete_irq(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
+ * @s: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Return whether an exception is "ready", i.e. whether the exception is
+ * enabled and is configured at a priority which would allow it to
+ * interrupt the current execution priority. This controls whether the
+ * RDY bit for it in the FPCCR is set.
+ */
+bool armv7m_nvic_get_ready_status(NVICState *s, int irq, bool secure);
+/**
+ * armv7m_nvic_raw_execution_priority: return the raw execution priority
+ * @s: the NVIC
+ *
+ * Returns: the raw execution priority as defined by the v8M architecture.
+ * This is the execution priority minus the effects of AIRCR.PRIS,
+ * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
+ * (v8M ARM ARM I_PKLD.)
+ */
+int armv7m_nvic_raw_execution_priority(NVICState *s);
+/**
+ * armv7m_nvic_neg_prio_requested: return true if the requested execution
+ * priority is negative for the specified security state.
+ * @s: the NVIC
+ * @secure: the security state to test
+ * This corresponds to the pseudocode IsReqExecPriNeg().
+ */
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure);
+#else
+static inline bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure)
+{
+ return false;
+}
+#endif
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_can_take_pending_exception(NVICState *s);
+#else
+static inline bool armv7m_nvic_can_take_pending_exception(NVICState *s)
+{
+ return true;
+}
+#endif
#endif
diff --git a/include/hw/intc/aspeed_vic.h b/include/hw/intc/aspeed_vic.h
index 107ff17c3b..68d6ab997a 100644
--- a/include/hw/intc/aspeed_vic.h
+++ b/include/hw/intc/aspeed_vic.h
@@ -14,13 +14,14 @@
#define ASPEED_VIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_VIC "aspeed.vic"
-#define ASPEED_VIC(obj) OBJECT_CHECK(AspeedVICState, (obj), TYPE_ASPEED_VIC)
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedVICState, ASPEED_VIC)
#define ASPEED_VIC_NR_IRQS 51
-typedef struct AspeedVICState {
+struct AspeedVICState {
/*< private >*/
SysBusDevice parent_obj;
@@ -43,6 +44,6 @@ typedef struct AspeedVICState {
/* 0=low-sensitive/falling-edge, 1=high-sensitive/rising-edge */
uint64_t event;
-} AspeedVICState;
+};
#endif /* ASPEED_VIC_H */
diff --git a/include/hw/intc/bcm2835_ic.h b/include/hw/intc/bcm2835_ic.h
index 392ded1cb3..588eb76c5c 100644
--- a/include/hw/intc/bcm2835_ic.h
+++ b/include/hw/intc/bcm2835_ic.h
@@ -9,14 +9,15 @@
#define BCM2835_IC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_IC "bcm2835-ic"
-#define BCM2835_IC(obj) OBJECT_CHECK(BCM2835ICState, (obj), TYPE_BCM2835_IC)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835ICState, BCM2835_IC)
#define BCM2835_IC_GPU_IRQ "gpu-irq"
#define BCM2835_IC_ARM_IRQ "arm-irq"
-typedef struct BCM2835ICState {
+struct BCM2835ICState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -30,6 +31,6 @@ typedef struct BCM2835ICState {
uint8_t arm_irq_level, arm_irq_enable;
bool fiq_enable;
uint8_t fiq_select;
-} BCM2835ICState;
+};
#endif
diff --git a/include/hw/intc/bcm2836_control.h b/include/hw/intc/bcm2836_control.h
index 2c22405686..a410c817e8 100644
--- a/include/hw/intc/bcm2836_control.h
+++ b/include/hw/intc/bcm2836_control.h
@@ -17,16 +17,16 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
/* 4 mailboxes per core, for 16 total */
#define BCM2836_NCORES 4
#define BCM2836_MBPERCORE 4
#define TYPE_BCM2836_CONTROL "bcm2836-control"
-#define BCM2836_CONTROL(obj) \
- OBJECT_CHECK(BCM2836ControlState, (obj), TYPE_BCM2836_CONTROL)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2836ControlState, BCM2836_CONTROL)
-typedef struct BCM2836ControlState {
+struct BCM2836ControlState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -56,6 +56,6 @@ typedef struct BCM2836ControlState {
/* outputs to CPU cores */
qemu_irq irq[BCM2836_NCORES];
qemu_irq fiq[BCM2836_NCORES];
-} BCM2836ControlState;
+};
#endif
diff --git a/include/hw/intc/exynos4210_combiner.h b/include/hw/intc/exynos4210_combiner.h
new file mode 100644
index 0000000000..bd207a7e6e
--- /dev/null
+++ b/include/hw/intc/exynos4210_combiner.h
@@ -0,0 +1,57 @@
+/*
+ * Samsung exynos4210 Interrupt Combiner
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_INTC_EXYNOS4210_COMBINER_H
+#define HW_INTC_EXYNOS4210_COMBINER_H
+
+#include "hw/sysbus.h"
+
+/*
+ * State for each output signal of internal combiner
+ */
+typedef struct CombinerGroupState {
+ uint8_t src_mask; /* 1 - source enabled, 0 - disabled */
+ uint8_t src_pending; /* Pending source interrupts before masking */
+} CombinerGroupState;
+
+#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER)
+
+/* Number of groups and total number of interrupts for the internal combiner */
+#define IIC_NGRP 64
+#define IIC_NIRQ (IIC_NGRP * 8)
+#define IIC_REGSET_SIZE 0x41
+
+struct Exynos4210CombinerState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+
+ struct CombinerGroupState group[IIC_NGRP];
+ uint32_t reg_set[IIC_REGSET_SIZE];
+ uint32_t icipsr[2];
+ uint32_t external; /* 1 means that this combiner is external */
+
+ qemu_irq output_irq[IIC_NGRP];
+};
+
+#endif
diff --git a/include/hw/intc/exynos4210_gic.h b/include/hw/intc/exynos4210_gic.h
new file mode 100644
index 0000000000..f64c4069c6
--- /dev/null
+++ b/include/hw/intc/exynos4210_gic.h
@@ -0,0 +1,43 @@
+/*
+ * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c
+ *
+ * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd.
+ * All rights reserved.
+ *
+ * Evgeny Voevodin <e.voevodin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HW_INTC_EXYNOS4210_GIC_H
+#define HW_INTC_EXYNOS4210_GIC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_EXYNOS4210_GIC "exynos4210.gic"
+OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC)
+
+#define EXYNOS4210_GIC_NCPUS 2
+
+struct Exynos4210GicState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion cpu_container;
+ MemoryRegion dist_container;
+ MemoryRegion cpu_alias[EXYNOS4210_GIC_NCPUS];
+ MemoryRegion dist_alias[EXYNOS4210_GIC_NCPUS];
+ uint32_t num_cpu;
+ DeviceState *gic;
+};
+
+#endif
diff --git a/include/hw/intc/goldfish_pic.h b/include/hw/intc/goldfish_pic.h
new file mode 100644
index 0000000000..3e79580367
--- /dev/null
+++ b/include/hw/intc/goldfish_pic.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Goldfish PIC
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef HW_INTC_GOLDFISH_PIC_H
+#define HW_INTC_GOLDFISH_PIC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_GOLDFISH_PIC "goldfish_pic"
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishPICState, GOLDFISH_PIC)
+
+#define GOLDFISH_PIC_IRQ_NB 32
+
+struct GoldfishPICState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t pending;
+ uint32_t enabled;
+
+ /* statistics */
+ uint64_t stats_irq_count[32];
+ /* for tracing */
+ uint8_t idx;
+};
+
+#endif
diff --git a/include/hw/sparc/grlib.h b/include/hw/intc/grlib_irqmp.h
index 78b6178fcd..a76acbf940 100644
--- a/include/hw/sparc/grlib.h
+++ b/include/hw/intc/grlib_irqmp.h
@@ -1,7 +1,9 @@
/*
* QEMU GRLIB Components
*
- * Copyright (c) 2010-2019 AdaCore
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2010-2024 AdaCore
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -22,8 +24,8 @@
* THE SOFTWARE.
*/
-#ifndef GRLIB_H
-#define GRLIB_H
+#ifndef GRLIB_IRQMP_H
+#define GRLIB_IRQMP_H
#include "hw/sysbus.h"
@@ -32,18 +34,8 @@
*/
/* IRQMP */
-#define TYPE_GRLIB_IRQMP "grlib,irqmp"
-
-typedef void (*set_pil_in_fn) (void *opaque, uint32_t pil_in);
-
-void grlib_irqmp_set_irq(void *opaque, int irq, int level);
-
-void grlib_irqmp_ack(DeviceState *dev, int intno);
-
-/* GPTimer */
-#define TYPE_GRLIB_GPTIMER "grlib,gptimer"
+#define TYPE_GRLIB_IRQMP "grlib-irqmp"
-/* APB UART */
-#define TYPE_GRLIB_APB_UART "grlib,apbuart"
+void grlib_irqmp_ack(DeviceState *dev, unsigned int cpu, int intno);
-#endif /* GRLIB_H */
+#endif /* GRLIB_IRQMP_H */
diff --git a/include/hw/intc/heathrow_pic.h b/include/hw/intc/heathrow_pic.h
index b163e27ab9..c0a7f6f546 100644
--- a/include/hw/intc/heathrow_pic.h
+++ b/include/hw/intc/heathrow_pic.h
@@ -27,9 +27,10 @@
#define HW_INTC_HEATHROW_PIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_HEATHROW "heathrow"
-#define HEATHROW(obj) OBJECT_CHECK(HeathrowState, (obj), TYPE_HEATHROW)
+OBJECT_DECLARE_SIMPLE_TYPE(HeathrowState, HEATHROW)
typedef struct HeathrowPICState {
uint32_t events;
@@ -38,13 +39,13 @@ typedef struct HeathrowPICState {
uint32_t level_triggered;
} HeathrowPICState;
-typedef struct HeathrowState {
+struct HeathrowState {
SysBusDevice parent_obj;
MemoryRegion mem;
HeathrowPICState pics[2];
qemu_irq irqs[1];
-} HeathrowState;
+};
#define HEATHROW_NUM_IRQS 64
diff --git a/include/hw/intc/i8259.h b/include/hw/intc/i8259.h
index e2b1e8c59a..c412575775 100644
--- a/include/hw/intc/i8259.h
+++ b/include/hw/intc/i8259.h
@@ -3,10 +3,18 @@
/* i8259.c */
-extern DeviceState *isa_pic;
-qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq);
+extern PICCommonState *isa_pic;
+
+/*
+ * i8259_init()
+ *
+ * Create a i8259 device on an ISA @bus,
+ * connect its output to @parent_irq_in,
+ * return an (allocated) array of 16 input IRQs.
+ */
+qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in);
qemu_irq *kvm_i8259_init(ISABus *bus);
-int pic_get_output(DeviceState *d);
-int pic_read_irq(DeviceState *d);
+int pic_get_output(PICCommonState *s);
+int pic_read_irq(PICCommonState *s);
#endif
diff --git a/include/hw/intc/ibex_plic.h b/include/hw/intc/ibex_plic.h
deleted file mode 100644
index ddc7909903..0000000000
--- a/include/hw/intc/ibex_plic.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * QEMU RISC-V lowRISC Ibex PLIC
- *
- * Copyright (c) 2020 Western Digital
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2 or later, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HW_IBEX_PLIC_H
-#define HW_IBEX_PLIC_H
-
-#include "hw/sysbus.h"
-
-#define TYPE_IBEX_PLIC "ibex-plic"
-#define IBEX_PLIC(obj) \
- OBJECT_CHECK(IbexPlicState, (obj), TYPE_IBEX_PLIC)
-
-typedef struct IbexPlicState {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- MemoryRegion mmio;
-
- uint32_t *pending;
- uint32_t *source;
- uint32_t *priority;
- uint32_t *enable;
- uint32_t threshold;
- uint32_t claim;
-
- /* config */
- uint32_t num_cpus;
- uint32_t num_sources;
-
- uint32_t pending_base;
- uint32_t pending_num;
-
- uint32_t source_base;
- uint32_t source_num;
-
- uint32_t priority_base;
- uint32_t priority_num;
-
- uint32_t enable_base;
- uint32_t enable_num;
-
- uint32_t threshold_base;
-
- uint32_t claim_base;
-} IbexPlicState;
-
-#endif /* HW_IBEX_PLIC_H */
diff --git a/include/hw/intc/imx_avic.h b/include/hw/intc/imx_avic.h
index 1b80769018..75fbd1a89c 100644
--- a/include/hw/intc/imx_avic.h
+++ b/include/hw/intc/imx_avic.h
@@ -18,9 +18,10 @@
#define IMX_AVIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_AVIC "imx.avic"
-#define IMX_AVIC(obj) OBJECT_CHECK(IMXAVICState, (obj), TYPE_IMX_AVIC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXAVICState, IMX_AVIC)
#define IMX_AVIC_NUM_IRQS 64
@@ -36,7 +37,7 @@
#define PRIO_PER_WORD (sizeof(uint32_t) * 8 / 4)
#define PRIO_WORDS (IMX_AVIC_NUM_IRQS/PRIO_PER_WORD)
-typedef struct IMXAVICState{
+struct IMXAVICState {
/*< private >*/
SysBusDevice parent_obj;
@@ -50,6 +51,6 @@ typedef struct IMXAVICState{
qemu_irq irq;
qemu_irq fiq;
uint32_t prio[PRIO_WORDS]; /* Priorities are 4-bits each */
-} IMXAVICState;
+};
#endif /* IMX_AVIC_H */
diff --git a/include/hw/intc/imx_gpcv2.h b/include/hw/intc/imx_gpcv2.h
index ed978b24bb..7bdee7e80a 100644
--- a/include/hw/intc/imx_gpcv2.h
+++ b/include/hw/intc/imx_gpcv2.h
@@ -2,21 +2,22 @@
#define IMX_GPCV2_H
#include "hw/sysbus.h"
+#include "qom/object.h"
enum IMXGPCv2Registers {
GPC_NUM = 0xE00 / sizeof(uint32_t),
};
-typedef struct IMXGPCv2State {
+struct IMXGPCv2State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
uint32_t regs[GPC_NUM];
-} IMXGPCv2State;
+};
#define TYPE_IMX_GPCV2 "imx-gpcv2"
-#define IMX_GPCV2(obj) OBJECT_CHECK(IMXGPCv2State, (obj), TYPE_IMX_GPCV2)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXGPCv2State, IMX_GPCV2)
#endif /* IMX_GPCV2_H */
diff --git a/include/hw/intc/intc.h b/include/hw/intc/intc.h
index fb3e8e621f..7018f608ca 100644
--- a/include/hw/intc/intc.h
+++ b/include/hw/intc/intc.h
@@ -5,19 +5,16 @@
#define TYPE_INTERRUPT_STATS_PROVIDER "intctrl"
-#define INTERRUPT_STATS_PROVIDER_CLASS(klass) \
- OBJECT_CLASS_CHECK(InterruptStatsProviderClass, (klass), \
+typedef struct InterruptStatsProviderClass InterruptStatsProviderClass;
+DECLARE_CLASS_CHECKERS(InterruptStatsProviderClass, INTERRUPT_STATS_PROVIDER,
TYPE_INTERRUPT_STATS_PROVIDER)
-#define INTERRUPT_STATS_PROVIDER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(InterruptStatsProviderClass, (obj), \
- TYPE_INTERRUPT_STATS_PROVIDER)
#define INTERRUPT_STATS_PROVIDER(obj) \
INTERFACE_CHECK(InterruptStatsProvider, (obj), \
TYPE_INTERRUPT_STATS_PROVIDER)
typedef struct InterruptStatsProvider InterruptStatsProvider;
-typedef struct InterruptStatsProviderClass {
+struct InterruptStatsProviderClass {
InterfaceClass parent;
/* The returned pointer and statistics must remain valid until
@@ -26,6 +23,6 @@ typedef struct InterruptStatsProviderClass {
bool (*get_statistics)(InterruptStatsProvider *obj, uint64_t **irq_counts,
unsigned int *nb_irqs);
void (*print_info)(InterruptStatsProvider *obj, Monitor *mon);
-} InterruptStatsProviderClass;
+};
#endif
diff --git a/include/hw/i386/ioapic.h b/include/hw/intc/ioapic.h
index 59fcb158a7..aa122e25e3 100644
--- a/include/hw/i386/ioapic.h
+++ b/include/hw/intc/ioapic.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -17,15 +17,17 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HW_IOAPIC_H
-#define HW_IOAPIC_H
+#ifndef HW_INTC_IOAPIC_H
+#define HW_INTC_IOAPIC_H
#define IOAPIC_NUM_PINS 24
#define IO_APIC_DEFAULT_ADDRESS 0xfec00000
+#define IO_APIC_SECONDARY_ADDRESS (IO_APIC_DEFAULT_ADDRESS + 0x10000)
+#define IO_APIC_SECONDARY_IRQBASE 24 /* primary 0 -> 23, secondary 24 -> 47 */
#define TYPE_KVM_IOAPIC "kvm-ioapic"
#define TYPE_IOAPIC "ioapic"
void ioapic_eoi_broadcast(int vector);
-#endif /* HW_IOAPIC_H */
+#endif /* HW_INTC_IOAPIC_H */
diff --git a/include/hw/intc/kvm_irqcount.h b/include/hw/intc/kvm_irqcount.h
new file mode 100644
index 0000000000..0ed5999e49
--- /dev/null
+++ b/include/hw/intc/kvm_irqcount.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#ifndef KVM_IRQCOUNT_H
+#define KVM_IRQCOUNT_H
+
+void kvm_report_irq_delivered(int delivered);
+void kvm_reset_irq_delivered(void);
+int kvm_get_irq_delivered(void);
+
+#endif
diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h
new file mode 100644
index 0000000000..a0a46b888c
--- /dev/null
+++ b/include/hw/intc/loongarch_extioi.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 3A5000 ext interrupt controller definitions
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+#include "hw/loongarch/virt.h"
+
+#ifndef LOONGARCH_EXTIOI_H
+#define LOONGARCH_EXTIOI_H
+
+#define LS3A_INTC_IP 8
+#define EXTIOI_IRQS (256)
+#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8)
+/* irq from EXTIOI is routed to no more than 4 cpus */
+#define EXTIOI_CPUS (4)
+/* map to ipnum per 32 irqs */
+#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32)
+#define EXTIOI_IRQS_COREMAP_SIZE 256
+#define EXTIOI_IRQS_NODETYPE_COUNT 16
+#define EXTIOI_IRQS_GROUP_COUNT 8
+
+#define APIC_OFFSET 0x400
+#define APIC_BASE (0x1000ULL + APIC_OFFSET)
+
+#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET)
+#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET)
+#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET)
+#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET)
+#define EXTIOI_ISR_START (0x700 - APIC_OFFSET)
+#define EXTIOI_ISR_END (0x720 - APIC_OFFSET)
+#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET)
+#define EXTIOI_COREISR_END (0xB20 - APIC_OFFSET)
+#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET)
+#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET)
+
+typedef struct ExtIOICore {
+ uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT];
+ DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS);
+ qemu_irq parent_irq[LS3A_INTC_IP];
+} ExtIOICore;
+
+#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI)
+struct LoongArchExtIOI {
+ SysBusDevice parent_obj;
+ uint32_t num_cpu;
+ /* hardware state */
+ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2];
+ uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT];
+ uint32_t isr[EXTIOI_IRQS / 32];
+ uint32_t enable[EXTIOI_IRQS / 32];
+ uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4];
+ uint32_t coremap[EXTIOI_IRQS / 4];
+ uint32_t sw_pending[EXTIOI_IRQS / 32];
+ uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE];
+ uint8_t sw_coremap[EXTIOI_IRQS];
+ qemu_irq irq[EXTIOI_IRQS];
+ ExtIOICore *cpu;
+ MemoryRegion extioi_system_mem;
+};
+#endif /* LOONGARCH_EXTIOI_H */
diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h
new file mode 100644
index 0000000000..1c1e834849
--- /dev/null
+++ b/include/hw/intc/loongarch_ipi.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch ipi interrupt header files
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_IPI_H
+#define HW_LOONGARCH_IPI_H
+
+#include "hw/sysbus.h"
+
+/* Mainy used by iocsr read and write */
+#define SMP_IPI_MAILBOX 0x1000ULL
+#define CORE_STATUS_OFF 0x0
+#define CORE_EN_OFF 0x4
+#define CORE_SET_OFF 0x8
+#define CORE_CLEAR_OFF 0xc
+#define CORE_BUF_20 0x20
+#define CORE_BUF_28 0x28
+#define CORE_BUF_30 0x30
+#define CORE_BUF_38 0x38
+#define IOCSR_IPI_SEND 0x40
+#define IOCSR_MAIL_SEND 0x48
+#define IOCSR_ANY_SEND 0x158
+
+#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND)
+#define MAIL_SEND_OFFSET 0
+#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND)
+
+#define IPI_MBX_NUM 4
+
+#define TYPE_LOONGARCH_IPI "loongarch_ipi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchIPI, LOONGARCH_IPI)
+
+typedef struct IPICore {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ /* 64bit buf divide into 2 32bit buf */
+ uint32_t buf[IPI_MBX_NUM * 2];
+ qemu_irq irq;
+} IPICore;
+
+struct LoongArchIPI {
+ SysBusDevice parent_obj;
+ MemoryRegion ipi_iocsr_mem;
+ MemoryRegion ipi64_iocsr_mem;
+ uint32_t num_cpu;
+ IPICore *cpu;
+};
+
+#endif
diff --git a/include/hw/intc/loongarch_pch_msi.h b/include/hw/intc/loongarch_pch_msi.h
new file mode 100644
index 0000000000..b8586fb3b6
--- /dev/null
+++ b/include/hw/intc/loongarch_pch_msi.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 7A1000 I/O interrupt controller definitions
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+
+#define TYPE_LOONGARCH_PCH_MSI "loongarch_pch_msi"
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHMSI, LOONGARCH_PCH_MSI)
+
+/* MSI irq start from 32 to 255 */
+#define PCH_MSI_IRQ_START 32
+#define PCH_MSI_IRQ_END 255
+#define PCH_MSI_IRQ_NUM 224
+
+struct LoongArchPCHMSI {
+ SysBusDevice parent_obj;
+ qemu_irq *pch_msi_irq;
+ MemoryRegion msi_mmio;
+ /* irq base passed to upper extioi intc */
+ unsigned int irq_base;
+ unsigned int irq_num;
+};
diff --git a/include/hw/intc/loongarch_pch_pic.h b/include/hw/intc/loongarch_pch_pic.h
new file mode 100644
index 0000000000..d5437e88f2
--- /dev/null
+++ b/include/hw/intc/loongarch_pch_pic.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch 7A1000 I/O interrupt controller definitions
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#include "hw/sysbus.h"
+
+#define TYPE_LOONGARCH_PCH_PIC "loongarch_pch_pic"
+#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PCH_PIC#name
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHPIC, LOONGARCH_PCH_PIC)
+
+#define PCH_PIC_INT_ID_VAL 0x7000000UL
+#define PCH_PIC_INT_ID_VER 0x1UL
+
+#define PCH_PIC_INT_ID_LO 0x00
+#define PCH_PIC_INT_ID_HI 0x04
+#define PCH_PIC_INT_MASK_LO 0x20
+#define PCH_PIC_INT_MASK_HI 0x24
+#define PCH_PIC_HTMSI_EN_LO 0x40
+#define PCH_PIC_HTMSI_EN_HI 0x44
+#define PCH_PIC_INT_EDGE_LO 0x60
+#define PCH_PIC_INT_EDGE_HI 0x64
+#define PCH_PIC_INT_CLEAR_LO 0x80
+#define PCH_PIC_INT_CLEAR_HI 0x84
+#define PCH_PIC_AUTO_CTRL0_LO 0xc0
+#define PCH_PIC_AUTO_CTRL0_HI 0xc4
+#define PCH_PIC_AUTO_CTRL1_LO 0xe0
+#define PCH_PIC_AUTO_CTRL1_HI 0xe4
+#define PCH_PIC_ROUTE_ENTRY_OFFSET 0x100
+#define PCH_PIC_ROUTE_ENTRY_END 0x13f
+#define PCH_PIC_HTMSI_VEC_OFFSET 0x200
+#define PCH_PIC_HTMSI_VEC_END 0x23f
+#define PCH_PIC_INT_STATUS_LO 0x3a0
+#define PCH_PIC_INT_STATUS_HI 0x3a4
+#define PCH_PIC_INT_POL_LO 0x3e0
+#define PCH_PIC_INT_POL_HI 0x3e4
+
+#define STATUS_LO_START 0
+#define STATUS_HI_START 0x4
+#define POL_LO_START 0x40
+#define POL_HI_START 0x44
+struct LoongArchPCHPIC {
+ SysBusDevice parent_obj;
+ qemu_irq parent_irq[64];
+ uint64_t int_mask; /*0x020 interrupt mask register*/
+ uint64_t htmsi_en; /*0x040 1=msi*/
+ uint64_t intedge; /*0x060 edge=1 level =0*/
+ uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/
+ uint64_t auto_crtl0; /*0x0c0*/
+ uint64_t auto_crtl1; /*0x0e0*/
+ uint64_t last_intirr; /* edge detection */
+ uint64_t intirr; /* 0x380 interrupt request register */
+ uint64_t intisr; /* 0x3a0 interrupt service register */
+ /*
+ * 0x3e0 interrupt level polarity selection
+ * register 0 for high level trigger
+ */
+ uint64_t int_polarity;
+
+ uint8_t route_entry[64]; /*0x100 - 0x138*/
+ uint8_t htmsi_vector[64]; /*0x200 - 0x238*/
+
+ MemoryRegion iomem32_low;
+ MemoryRegion iomem32_high;
+ MemoryRegion iomem8;
+ unsigned int irq_num;
+};
diff --git a/include/hw/intc/loongson_liointc.h b/include/hw/intc/loongson_liointc.h
new file mode 100644
index 0000000000..848e65eb35
--- /dev/null
+++ b/include/hw/intc/loongson_liointc.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2020 Huacai Chen <chenhc@lemote.com>
+ * Copyright (c) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ */
+
+#ifndef LOONGSON_LIOINTC_H
+#define LOONGSON_LIOINTC_H
+
+#include "qemu/units.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_LOONGSON_LIOINTC "loongson.liointc"
+DECLARE_INSTANCE_CHECKER(struct loongson_liointc, LOONGSON_LIOINTC,
+ TYPE_LOONGSON_LIOINTC)
+
+#endif /* LOONGSON_LIOINTC_H */
diff --git a/include/hw/intc/m68k_irqc.h b/include/hw/intc/m68k_irqc.h
new file mode 100644
index 0000000000..693e33b0aa
--- /dev/null
+++ b/include/hw/intc/m68k_irqc.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * QEMU Motorola 680x0 IRQ Controller
+ *
+ * (c) 2020 Laurent Vivier <laurent@vivier.eu>
+ *
+ */
+
+#ifndef M68K_IRQC_H
+#define M68K_IRQC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_M68K_IRQC "m68k-irq-controller"
+#define M68K_IRQC(obj) OBJECT_CHECK(M68KIRQCState, (obj), \
+ TYPE_M68K_IRQC)
+
+#define M68K_IRQC_AUTOVECTOR_BASE 25
+
+enum {
+ M68K_IRQC_LEVEL_1 = 0,
+ M68K_IRQC_LEVEL_2,
+ M68K_IRQC_LEVEL_3,
+ M68K_IRQC_LEVEL_4,
+ M68K_IRQC_LEVEL_5,
+ M68K_IRQC_LEVEL_6,
+ M68K_IRQC_LEVEL_7,
+};
+#define M68K_IRQC_LEVEL_NUM (M68K_IRQC_LEVEL_7 - M68K_IRQC_LEVEL_1 + 1)
+
+typedef struct M68KIRQCState {
+ SysBusDevice parent_obj;
+
+ uint8_t ipr;
+ ArchCPU *cpu;
+
+ /* statistics */
+ uint64_t stats_irq_count[M68K_IRQC_LEVEL_NUM];
+} M68KIRQCState;
+
+#endif
diff --git a/include/hw/intc/mips_gic.h b/include/hw/intc/mips_gic.h
index 8428287bf9..5e4c71edd4 100644
--- a/include/hw/intc/mips_gic.h
+++ b/include/hw/intc/mips_gic.h
@@ -15,6 +15,7 @@
#include "hw/timer/mips_gictimer.h"
#include "hw/sysbus.h"
#include "cpu.h"
+#include "qom/object.h"
/*
* GIC Specific definitions
*/
@@ -170,13 +171,12 @@
#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
#define TYPE_MIPS_GIC "mips-gic"
-#define MIPS_GIC(obj) OBJECT_CHECK(MIPSGICState, (obj), TYPE_MIPS_GIC)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSGICState, MIPS_GIC)
/* Support up to 32 VPs and 256 IRQs */
#define GIC_MAX_VPS 32
#define GIC_MAX_INTRS 256
-typedef struct MIPSGICState MIPSGICState;
typedef struct MIPSGICIRQState MIPSGICIRQState;
typedef struct MIPSGICVPState MIPSGICVPState;
@@ -211,8 +211,8 @@ struct MIPSGICState {
/* GIC VP Timer */
MIPSGICTimerState *gic_timer;
- int32_t num_vps;
- int32_t num_irq;
+ uint32_t num_vps;
+ uint32_t num_irq;
};
#endif /* MIPS_GIC_H */
diff --git a/include/hw/intc/ppc-uic.h b/include/hw/intc/ppc-uic.h
new file mode 100644
index 0000000000..4d82e9a3c6
--- /dev/null
+++ b/include/hw/intc/ppc-uic.h
@@ -0,0 +1,78 @@
+/*
+ * "Universal" Interrupt Controller for PowerPPC 4xx embedded processors
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_INTC_PPC_UIC_H
+#define HW_INTC_PPC_UIC_H
+
+#include "hw/ppc/ppc4xx.h"
+
+#define TYPE_PPC_UIC "ppc-uic"
+OBJECT_DECLARE_SIMPLE_TYPE(PPCUIC, PPC_UIC)
+
+/*
+ * QEMU interface:
+ * QOM property "cpu": link to the PPC CPU
+ * (no default, must be set)
+ * QOM property "dcr-base": base of the bank of DCR registers for the UIC
+ * (default 0x30)
+ * QOM property "use-vectors": true if the UIC has vector registers
+ * (default true)
+ * unnamed GPIO inputs 0..UIC_MAX_IRQ: input IRQ lines
+ * sysbus IRQs:
+ * 0 (PPCUIC_OUTPUT_INT): output INT line to the CPU
+ * 1 (PPCUIC_OUTPUT_CINT): output CINT line to the CPU
+ */
+
+#define UIC_MAX_IRQ 32
+
+/* Symbolic constants for the sysbus IRQ outputs */
+enum {
+ PPCUIC_OUTPUT_INT = 0,
+ PPCUIC_OUTPUT_CINT = 1,
+ PPCUIC_OUTPUT_NB,
+};
+
+struct PPCUIC {
+ /*< private >*/
+ Ppc4xxDcrDeviceState parent_obj;
+
+ /*< public >*/
+ qemu_irq output_int;
+ qemu_irq output_cint;
+
+ /* properties */
+ uint32_t dcr_base;
+ bool use_vectors;
+
+ uint32_t level; /* Remembers the state of level-triggered interrupts. */
+ uint32_t uicsr; /* Status register */
+ uint32_t uicer; /* Enable register */
+ uint32_t uiccr; /* Critical register */
+ uint32_t uicpr; /* Polarity register */
+ uint32_t uictr; /* Triggering register */
+ uint32_t uicvcr; /* Vector configuration register */
+ uint32_t uicvr;
+};
+
+#endif
diff --git a/include/hw/intc/realview_gic.h b/include/hw/intc/realview_gic.h
index 1783ea11b9..f37339dc0b 100644
--- a/include/hw/intc/realview_gic.h
+++ b/include/hw/intc/realview_gic.h
@@ -12,17 +12,17 @@
#include "hw/sysbus.h"
#include "hw/intc/arm_gic.h"
+#include "qom/object.h"
#define TYPE_REALVIEW_GIC "realview_gic"
-#define REALVIEW_GIC(obj) \
- OBJECT_CHECK(RealViewGICState, (obj), TYPE_REALVIEW_GIC)
+OBJECT_DECLARE_SIMPLE_TYPE(RealViewGICState, REALVIEW_GIC)
-typedef struct RealViewGICState {
+struct RealViewGICState {
SysBusDevice parent_obj;
MemoryRegion container;
GICState gic;
-} RealViewGICState;
+};
#endif
diff --git a/include/hw/intc/riscv_aclint.h b/include/hw/intc/riscv_aclint.h
new file mode 100644
index 0000000000..693415eb6d
--- /dev/null
+++ b/include/hw/intc/riscv_aclint.h
@@ -0,0 +1,83 @@
+/*
+ * RISC-V ACLINT (Advanced Core Local Interruptor) interface
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017 SiFive, Inc.
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_ACLINT_H
+#define HW_RISCV_ACLINT_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_RISCV_ACLINT_MTIMER "riscv.aclint.mtimer"
+
+#define RISCV_ACLINT_MTIMER(obj) \
+ OBJECT_CHECK(RISCVAclintMTimerState, (obj), TYPE_RISCV_ACLINT_MTIMER)
+
+typedef struct RISCVAclintMTimerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ uint64_t time_delta;
+ uint64_t *timecmp;
+ QEMUTimer **timers;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t timecmp_base;
+ uint32_t time_base;
+ uint32_t aperture_size;
+ uint32_t timebase_freq;
+ qemu_irq *timer_irqs;
+} RISCVAclintMTimerState;
+
+DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
+ uint32_t hartid_base, uint32_t num_harts,
+ uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq,
+ bool provide_rdtime);
+
+#define TYPE_RISCV_ACLINT_SWI "riscv.aclint.swi"
+
+#define RISCV_ACLINT_SWI(obj) \
+ OBJECT_CHECK(RISCVAclintSwiState, (obj), TYPE_RISCV_ACLINT_SWI)
+
+typedef struct RISCVAclintSwiState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t sswi;
+ qemu_irq *soft_irqs;
+} RISCVAclintSwiState;
+
+DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base,
+ uint32_t num_harts, bool sswi);
+
+enum {
+ RISCV_ACLINT_DEFAULT_MTIMECMP = 0x0,
+ RISCV_ACLINT_DEFAULT_MTIME = 0x7ff8,
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE = 0x8000,
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ = 10000000,
+ RISCV_ACLINT_MAX_HARTS = 4095,
+ RISCV_ACLINT_SWI_SIZE = 0x4000
+};
+
+#endif
diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h
new file mode 100644
index 0000000000..de8532fbc3
--- /dev/null
+++ b/include/hw/intc/riscv_aplic.h
@@ -0,0 +1,79 @@
+/*
+ * RISC-V APLIC (Advanced Platform Level Interrupt Controller) interface
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_APLIC_H
+#define HW_RISCV_APLIC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_APLIC "riscv.aplic"
+
+typedef struct RISCVAPLICState RISCVAPLICState;
+DECLARE_INSTANCE_CHECKER(RISCVAPLICState, RISCV_APLIC, TYPE_RISCV_APLIC)
+
+#define APLIC_MIN_SIZE 0x4000
+#define APLIC_SIZE_ALIGN(__x) (((__x) + (APLIC_MIN_SIZE - 1)) & \
+ ~(APLIC_MIN_SIZE - 1))
+#define APLIC_SIZE(__num_harts) (APLIC_MIN_SIZE + \
+ APLIC_SIZE_ALIGN(32 * (__num_harts)))
+
+struct RISCVAPLICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ qemu_irq *external_irqs;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t bitfield_words;
+ uint32_t domaincfg;
+ uint32_t mmsicfgaddr;
+ uint32_t mmsicfgaddrH;
+ uint32_t smsicfgaddr;
+ uint32_t smsicfgaddrH;
+ uint32_t genmsi;
+ uint32_t *sourcecfg;
+ uint32_t *state;
+ uint32_t *target;
+ uint32_t *idelivery;
+ uint32_t *iforce;
+ uint32_t *ithreshold;
+
+ /* topology */
+#define QEMU_APLIC_MAX_CHILDREN 16
+ struct RISCVAPLICState *parent;
+ struct RISCVAPLICState *children[QEMU_APLIC_MAX_CHILDREN];
+ uint16_t num_children;
+
+ /* config */
+ uint32_t aperture_size;
+ uint32_t hartid_base;
+ uint32_t num_harts;
+ uint32_t iprio_mask;
+ uint32_t num_irqs;
+ bool msimode;
+ bool mmode;
+};
+
+void riscv_aplic_add_child(DeviceState *parent, DeviceState *child);
+
+DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
+ uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources,
+ uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent);
+
+#endif
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
new file mode 100644
index 0000000000..58c2aaa8dc
--- /dev/null
+++ b/include/hw/intc/riscv_imsic.h
@@ -0,0 +1,68 @@
+/*
+ * RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_RISCV_IMSIC_H
+#define HW_RISCV_IMSIC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_RISCV_IMSIC "riscv.imsic"
+
+typedef struct RISCVIMSICState RISCVIMSICState;
+DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC)
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ)
+
+#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6
+#define IMSIC_MMIO_GROUP_MIN_SHIFT 24
+
+#define IMSIC_HART_NUM_GUESTS(__guest_bits) \
+ (1U << (__guest_bits))
+#define IMSIC_HART_SIZE(__guest_bits) \
+ (IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ)
+#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \
+ (1U << (__hart_bits))
+#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \
+ (IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits))
+
+struct RISCVIMSICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ qemu_irq *external_irqs;
+
+ /*< public >*/
+ MemoryRegion mmio;
+ uint32_t num_eistate;
+ uint32_t *eidelivery;
+ uint32_t *eithreshold;
+ uint32_t *eistate;
+
+ /* config */
+ bool mmode;
+ uint32_t hartid;
+ uint32_t num_pages;
+ uint32_t num_irqs;
+};
+
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
+ uint32_t num_pages, uint32_t num_ids);
+
+#endif
diff --git a/include/hw/intc/rx_icu.h b/include/hw/intc/rx_icu.h
index 7176015cd9..b23504f3dd 100644
--- a/include/hw/intc/rx_icu.h
+++ b/include/hw/intc/rx_icu.h
@@ -22,6 +22,7 @@
#define HW_INTC_RX_ICU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
enum TRG_MODE {
TRG_LEVEL = 0,
@@ -68,9 +69,8 @@ struct RXICUState {
qemu_irq _fir;
qemu_irq _swi;
};
-typedef struct RXICUState RXICUState;
#define TYPE_RX_ICU "rx-icu"
-#define RX_ICU(obj) OBJECT_CHECK(RXICUState, (obj), TYPE_RX_ICU)
+OBJECT_DECLARE_SIMPLE_TYPE(RXICUState, RX_ICU)
-#endif /* RX_ICU_H */
+#endif /* HW_INTC_RX_ICU_H */
diff --git a/include/hw/riscv/sifive_plic.h b/include/hw/intc/sifive_plic.h
index 4421e81249..d3f45ec248 100644
--- a/include/hw/riscv/sifive_plic.h
+++ b/include/hw/intc/sifive_plic.h
@@ -22,16 +22,17 @@
#define HW_SIFIVE_PLIC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_SIFIVE_PLIC "riscv.sifive.plic"
-#define SIFIVE_PLIC(obj) \
- OBJECT_CHECK(SiFivePLICState, (obj), TYPE_SIFIVE_PLIC)
+typedef struct SiFivePLICState SiFivePLICState;
+DECLARE_INSTANCE_CHECKER(SiFivePLICState, SIFIVE_PLIC,
+ TYPE_SIFIVE_PLIC)
typedef enum PLICMode {
PLICMode_U,
PLICMode_S,
- PLICMode_H,
PLICMode_M
} PLICMode;
@@ -41,14 +42,16 @@ typedef struct PLICAddr {
PLICMode mode;
} PLICAddr;
-typedef struct SiFivePLICState {
+struct SiFivePLICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mmio;
uint32_t num_addrs;
+ uint32_t num_harts;
uint32_t bitfield_words;
+ uint32_t num_enables;
PLICAddr *addr_config;
uint32_t *source_priority;
uint32_t *target_priority;
@@ -58,6 +61,7 @@ typedef struct SiFivePLICState {
/* config */
char *hart_config;
+ uint32_t hartid_base;
uint32_t num_sources;
uint32_t num_priorities;
uint32_t priority_base;
@@ -67,13 +71,17 @@ typedef struct SiFivePLICState {
uint32_t context_base;
uint32_t context_stride;
uint32_t aperture_size;
-} SiFivePLICState;
+
+ qemu_irq *m_external_irqs;
+ qemu_irq *s_external_irqs;
+};
DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
- uint32_t num_sources, uint32_t num_priorities,
- uint32_t priority_base, uint32_t pending_base,
- uint32_t enable_base, uint32_t enable_stride,
- uint32_t context_base, uint32_t context_stride,
- uint32_t aperture_size);
+ uint32_t num_harts,
+ uint32_t hartid_base, uint32_t num_sources,
+ uint32_t num_priorities, uint32_t priority_base,
+ uint32_t pending_base, uint32_t enable_base,
+ uint32_t enable_stride, uint32_t context_base,
+ uint32_t context_stride, uint32_t aperture_size);
#endif
diff --git a/include/hw/intc/xlnx-pmu-iomod-intc.h b/include/hw/intc/xlnx-pmu-iomod-intc.h
index 0bd118884a..ccc8bd272a 100644
--- a/include/hw/intc/xlnx-pmu-iomod-intc.h
+++ b/include/hw/intc/xlnx-pmu-iomod-intc.h
@@ -27,16 +27,16 @@
#include "hw/sysbus.h"
#include "hw/register.h"
+#include "qom/object.h"
#define TYPE_XLNX_PMU_IO_INTC "xlnx.pmu_io_intc"
-#define XLNX_PMU_IO_INTC(obj) \
- OBJECT_CHECK(XlnxPMUIOIntc, (obj), TYPE_XLNX_PMU_IO_INTC)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxPMUIOIntc, XLNX_PMU_IO_INTC)
/* This is R_PIT3_CONTROL + 1 */
#define XLNXPMUIOINTC_R_MAX (0x78 + 1)
-typedef struct XlnxPMUIOIntc {
+struct XlnxPMUIOIntc {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -52,6 +52,6 @@ typedef struct XlnxPMUIOIntc {
uint32_t regs[XLNXPMUIOINTC_R_MAX];
RegisterInfo regs_info[XLNXPMUIOINTC_R_MAX];
-} XlnxPMUIOIntc;
+};
#endif /* HW_INTC_XLNX_PMU_IOMOD_INTC_H */
diff --git a/include/hw/intc/xlnx-zynqmp-ipi.h b/include/hw/intc/xlnx-zynqmp-ipi.h
index 866c719c6f..33eff1d4f6 100644
--- a/include/hw/intc/xlnx-zynqmp-ipi.h
+++ b/include/hw/intc/xlnx-zynqmp-ipi.h
@@ -27,18 +27,18 @@
#include "hw/sysbus.h"
#include "hw/register.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQMP_IPI "xlnx.zynqmp_ipi"
-#define XLNX_ZYNQMP_IPI(obj) \
- OBJECT_CHECK(XlnxZynqMPIPI, (obj), TYPE_XLNX_ZYNQMP_IPI)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPIPI, XLNX_ZYNQMP_IPI)
/* This is R_IPI_IDR + 1 */
#define R_XLNX_ZYNQMP_IPI_MAX ((0x1c / 4) + 1)
#define NUM_IPIS 11
-typedef struct XlnxZynqMPIPI {
+struct XlnxZynqMPIPI {
/* Private */
SysBusDevice parent_obj;
@@ -51,6 +51,6 @@ typedef struct XlnxZynqMPIPI {
uint32_t regs[R_XLNX_ZYNQMP_IPI_MAX];
RegisterInfo regs_info[R_XLNX_ZYNQMP_IPI_MAX];
-} XlnxZynqMPIPI;
+};
#endif /* XLNX_ZYNQMP_IPI_H */
diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h
index 1c07969bc9..cbcdda509d 100644
--- a/include/hw/ipack/ipack.h
+++ b/include/hw/ipack/ipack.h
@@ -12,11 +12,11 @@
#define QEMU_IPACK_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct IPackBus IPackBus;
#define TYPE_IPACK_BUS "IndustryPack"
-#define IPACK_BUS(obj) OBJECT_CHECK(IPackBus, (obj), TYPE_IPACK_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(IPackBus, IPACK_BUS)
struct IPackBus {
/*< private >*/
@@ -28,16 +28,10 @@ struct IPackBus {
qemu_irq_handler set_irq;
};
-typedef struct IPackDevice IPackDevice;
-typedef struct IPackDeviceClass IPackDeviceClass;
#define TYPE_IPACK_DEVICE "ipack-device"
-#define IPACK_DEVICE(obj) \
- OBJECT_CHECK(IPackDevice, (obj), TYPE_IPACK_DEVICE)
-#define IPACK_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(IPackDeviceClass, (klass), TYPE_IPACK_DEVICE)
-#define IPACK_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IPackDeviceClass, (obj), TYPE_IPACK_DEVICE)
+OBJECT_DECLARE_TYPE(IPackDevice, IPackDeviceClass,
+ IPACK_DEVICE)
struct IPackDeviceClass {
/*< private >*/
@@ -79,9 +73,9 @@ extern const VMStateDescription vmstate_ipack_device;
VMSTATE_STRUCT(_field, _state, 1, vmstate_ipack_device, IPackDevice)
IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot);
-void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size,
- DeviceState *parent,
- const char *name, uint8_t n_slots,
- qemu_irq_handler handler);
+void ipack_bus_init(IPackBus *bus, size_t bus_size,
+ DeviceState *parent,
+ uint8_t n_slots,
+ qemu_irq_handler handler);
#endif
diff --git a/include/hw/ipmi/ipmi.h b/include/hw/ipmi/ipmi.h
index 8a99d958bb..77a7213ed9 100644
--- a/include/hw/ipmi/ipmi.h
+++ b/include/hw/ipmi/ipmi.h
@@ -27,6 +27,7 @@
#include "exec/memory.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define MAX_IPMI_MSG_SIZE 300
@@ -53,6 +54,7 @@ enum ipmi_op {
#define IPMI_CC_INVALID_DATA_FIELD 0xcc
#define IPMI_CC_BMC_INIT_IN_PROGRESS 0xd2
#define IPMI_CC_COMMAND_NOT_SUPPORTED 0xd5
+#define IPMI_CC_UNSPECIFIED 0xff
#define IPMI_NETFN_APP 0x06
#define IPMI_NETFN_OEM 0x3a
@@ -109,14 +111,13 @@ uint32_t ipmi_next_uuid(void);
#define TYPE_IPMI_INTERFACE "ipmi-interface"
#define IPMI_INTERFACE(obj) \
INTERFACE_CHECK(IPMIInterface, (obj), TYPE_IPMI_INTERFACE)
-#define IPMI_INTERFACE_CLASS(class) \
- OBJECT_CLASS_CHECK(IPMIInterfaceClass, (class), TYPE_IPMI_INTERFACE)
-#define IPMI_INTERFACE_GET_CLASS(class) \
- OBJECT_GET_CLASS(IPMIInterfaceClass, (class), TYPE_IPMI_INTERFACE)
+typedef struct IPMIInterfaceClass IPMIInterfaceClass;
+DECLARE_CLASS_CHECKERS(IPMIInterfaceClass, IPMI_INTERFACE,
+ TYPE_IPMI_INTERFACE)
typedef struct IPMIInterface IPMIInterface;
-typedef struct IPMIInterfaceClass {
+struct IPMIInterfaceClass {
InterfaceClass parent;
/*
@@ -169,28 +170,24 @@ typedef struct IPMIInterfaceClass {
* Return the firmware info for a device.
*/
void (*get_fwinfo)(struct IPMIInterface *s, IPMIFwInfo *info);
-} IPMIInterfaceClass;
+};
/*
* Define a BMC simulator (or perhaps a connection to a real BMC)
*/
#define TYPE_IPMI_BMC "ipmi-bmc"
-#define IPMI_BMC(obj) \
- OBJECT_CHECK(IPMIBmc, (obj), TYPE_IPMI_BMC)
-#define IPMI_BMC_CLASS(obj_class) \
- OBJECT_CLASS_CHECK(IPMIBmcClass, (obj_class), TYPE_IPMI_BMC)
-#define IPMI_BMC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IPMIBmcClass, (obj), TYPE_IPMI_BMC)
-
-typedef struct IPMIBmc {
+OBJECT_DECLARE_TYPE(IPMIBmc, IPMIBmcClass,
+ IPMI_BMC)
+
+struct IPMIBmc {
DeviceState parent;
uint8_t slave_addr;
IPMIInterface *intf;
-} IPMIBmc;
+};
-typedef struct IPMIBmcClass {
+struct IPMIBmcClass {
DeviceClass parent;
/* Called when the system resets to report to the bmc. */
@@ -203,7 +200,7 @@ typedef struct IPMIBmcClass {
uint8_t *cmd, unsigned int cmd_len,
unsigned int max_cmd_len,
uint8_t msg_id);
-} IPMIBmcClass;
+};
/*
* Add a link property to obj that points to a BMC.
@@ -267,10 +264,8 @@ int ipmi_bmc_sdr_find(IPMIBmc *b, uint16_t recid,
void ipmi_bmc_gen_event(IPMIBmc *b, uint8_t *evt, bool log);
#define TYPE_IPMI_BMC_SIMULATOR "ipmi-bmc-sim"
-#define IPMI_BMC_SIMULATOR(obj) OBJECT_CHECK(IPMIBmcSim, (obj), \
- TYPE_IPMI_BMC_SIMULATOR)
+OBJECT_DECLARE_SIMPLE_TYPE(IPMIBmcSim, IPMI_BMC_SIMULATOR)
-typedef struct IPMIBmcSim IPMIBmcSim;
typedef struct RspBuffer {
uint8_t buffer[MAX_IPMI_MSG_SIZE];
diff --git a/include/hw/irq.h b/include/hw/irq.h
index dc7abf199e..645b73d251 100644
--- a/include/hw/irq.h
+++ b/include/hw/irq.h
@@ -46,11 +46,6 @@ void qemu_free_irq(qemu_irq irq);
/* Returns a new IRQ with opposite polarity. */
qemu_irq qemu_irq_invert(qemu_irq irq);
-/* Returns a new IRQ which feeds into both the passed IRQs.
- * It's probably better to use the TYPE_SPLIT_IRQ device instead.
- */
-qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2);
-
/* For internal use in qtest. Similar to qemu_irq_split, but operating
on an existing vector of qemu_irq. */
void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n);
diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h
index 861d70d8f8..f9dcc4163e 100644
--- a/include/hw/isa/i8259_internal.h
+++ b/include/hw/isa/i8259_internal.h
@@ -28,24 +28,18 @@
#include "hw/isa/isa.h"
#include "hw/intc/intc.h"
#include "hw/intc/i8259.h"
+#include "qom/object.h"
-typedef struct PICCommonState PICCommonState;
#define TYPE_PIC_COMMON "pic-common"
-#define PIC_COMMON(obj) \
- OBJECT_CHECK(PICCommonState, (obj), TYPE_PIC_COMMON)
-#define PIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(PICCommonClass, (klass), TYPE_PIC_COMMON)
-#define PIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PICCommonClass, (obj), TYPE_PIC_COMMON)
+OBJECT_DECLARE_TYPE(PICCommonState, PICCommonClass, PIC_COMMON)
-typedef struct PICCommonClass
-{
- ISADeviceClass parent_class;
+struct PICCommonClass {
+ DeviceClass parent_class;
void (*pre_save)(PICCommonState *s);
void (*post_load)(PICCommonState *s);
-} PICCommonClass;
+};
struct PICCommonState {
ISADevice parent_obj;
@@ -67,6 +61,7 @@ struct PICCommonState {
uint8_t single_mode; /* true if slave pic is not initialized */
uint8_t elcr; /* PIIX edge/trigger selection*/
uint8_t elcr_mask;
+ uint8_t ltim; /* Edge/Level Bank Select (pre-PIIX, chip-wide) */
qemu_irq int_out[1];
uint32_t master; /* reflects /SP input pin */
uint32_t iobase;
@@ -78,8 +73,5 @@ struct PICCommonState {
void pic_reset_common(PICCommonState *s);
ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master);
void pic_stat_update_irq(int irq, int level);
-bool pic_get_statistics(InterruptStatsProvider *obj,
- uint64_t **irq_counts, unsigned int *nb_irqs);
-void pic_print_info(InterruptStatsProvider *obj, Monitor *mon);
#endif /* QEMU_I8259_INTERNAL_H */
diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h
index 52b61eed88..40d6224a4e 100644
--- a/include/hw/isa/isa.h
+++ b/include/hw/isa/isa.h
@@ -6,40 +6,21 @@
#include "exec/memory.h"
#include "exec/ioport.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define ISA_NUM_IRQS 16
#define TYPE_ISA_DEVICE "isa-device"
-#define ISA_DEVICE(obj) \
- OBJECT_CHECK(ISADevice, (obj), TYPE_ISA_DEVICE)
-#define ISA_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(ISADeviceClass, (klass), TYPE_ISA_DEVICE)
-#define ISA_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ISADeviceClass, (obj), TYPE_ISA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ISADevice, ISA_DEVICE)
#define TYPE_ISA_BUS "ISA"
-#define ISA_BUS(obj) OBJECT_CHECK(ISABus, (obj), TYPE_ISA_BUS)
-
-#define TYPE_APPLE_SMC "isa-applesmc"
-#define APPLESMC_MAX_DATA_LENGTH 32
-#define APPLESMC_PROP_IO_BASE "iobase"
-
-static inline uint16_t applesmc_port(void)
-{
- Object *obj = object_resolve_path_type("", TYPE_APPLE_SMC, NULL);
-
- if (obj) {
- return object_property_get_uint(obj, APPLESMC_PROP_IO_BASE, NULL);
- }
- return 0;
-}
+OBJECT_DECLARE_SIMPLE_TYPE(ISABus, ISA_BUS)
#define TYPE_ISADMA "isa-dma"
-#define ISADMA_CLASS(klass) \
- OBJECT_CLASS_CHECK(IsaDmaClass, (klass), TYPE_ISADMA)
-#define ISADMA_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IsaDmaClass, (obj), TYPE_ISADMA)
+typedef struct IsaDmaClass IsaDmaClass;
+DECLARE_CLASS_CHECKERS(IsaDmaClass, ISADMA,
+ TYPE_ISADMA)
#define ISADMA(obj) \
INTERFACE_CHECK(IsaDma, (obj), TYPE_ISADMA)
@@ -53,7 +34,7 @@ typedef enum {
typedef int (*IsaDmaTransferHandler)(void *opaque, int nchan, int pos,
int size);
-typedef struct IsaDmaClass {
+struct IsaDmaClass {
InterfaceClass parent;
bool (*has_autoinitialization)(IsaDma *obj, int nchan);
@@ -65,12 +46,7 @@ typedef struct IsaDmaClass {
void (*register_channel)(IsaDma *obj, int nchan,
IsaDmaTransferHandler transfer_handler,
void *opaque);
-} IsaDmaClass;
-
-typedef struct ISADeviceClass {
- DeviceClass parent_class;
- void (*build_aml)(ISADevice *dev, Aml *scope);
-} ISADeviceClass;
+};
struct ISABus {
/*< private >*/
@@ -79,7 +55,7 @@ struct ISABus {
MemoryRegion *address_space;
MemoryRegion *address_space_io;
- qemu_irq *irqs;
+ qemu_irq *irqs_in;
IsaDma *dma[2];
};
@@ -88,28 +64,34 @@ struct ISADevice {
DeviceState parent_obj;
/*< public >*/
- int8_t isairq[2]; /* -1 = unassigned */
- int nirqs;
int ioport_id;
};
ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space,
MemoryRegion *address_space_io, Error **errp);
-void isa_bus_irqs(ISABus *bus, qemu_irq *irqs);
-qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq);
-void isa_init_irq(ISADevice *dev, qemu_irq *p, unsigned isairq);
-void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq);
+void isa_bus_register_input_irqs(ISABus *bus, qemu_irq *irqs_in);
void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16);
-IsaDma *isa_get_dma(ISABus *bus, int nchan);
-MemoryRegion *isa_address_space(ISADevice *dev);
-MemoryRegion *isa_address_space_io(ISADevice *dev);
+IsaDma *isa_bus_get_dma(ISABus *bus, int nchan);
+/**
+ * isa_bus_get_irq: Return input IRQ on ISA bus.
+ * @bus: the #ISABus to plug ISA devices on.
+ * @irqnum: the ISA IRQ number.
+ *
+ * Return IRQ @irqnum from the PIC associated on ISA @bus.
+ */
+qemu_irq isa_bus_get_irq(ISABus *bus, unsigned irqnum);
ISADevice *isa_new(const char *name);
ISADevice *isa_try_new(const char *name);
bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp);
ISADevice *isa_create_simple(ISABus *bus, const char *name);
ISADevice *isa_vga_init(ISABus *bus);
-void isa_build_aml(ISABus *bus, Aml *scope);
+
+qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq);
+void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq);
+MemoryRegion *isa_address_space(ISADevice *dev);
+MemoryRegion *isa_address_space_io(ISADevice *dev);
+ISABus *isa_bus_from_device(ISADevice *dev);
/**
* isa_register_ioport: Install an I/O port region on the ISA bus.
@@ -137,18 +119,14 @@ void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start);
* @portio: the ports, sorted by offset.
* @opaque: passed into the portio callbacks.
* @name: passed into memory_region_init_io.
+ *
+ * Returns: 0 on success, negative error code otherwise (e.g. if the
+ * ISA bus is not available)
*/
-void isa_register_portio_list(ISADevice *dev,
- PortioList *piolist,
- uint16_t start,
- const MemoryRegionPortio *portio,
- void *opaque, const char *name);
-
-static inline ISABus *isa_bus_from_device(ISADevice *d)
-{
- return ISA_BUS(qdev_get_parent_bus(DEVICE(d)));
-}
-
-#define TYPE_PIIX4_PCI_DEVICE "piix4-isa"
+int isa_register_portio_list(ISADevice *dev,
+ PortioList *piolist,
+ uint16_t start,
+ const MemoryRegionPortio *portio,
+ void *opaque, const char *name);
#endif
diff --git a/include/hw/isa/pc87312.h b/include/hw/isa/pc87312.h
index e16263d4b1..edaf723f4d 100644
--- a/include/hw/isa/pc87312.h
+++ b/include/hw/isa/pc87312.h
@@ -26,12 +26,13 @@
#define QEMU_PC87312_H
#include "hw/isa/superio.h"
+#include "qom/object.h"
-#define TYPE_PC87312_SUPERIO "pc87312"
-#define PC87312(obj) OBJECT_CHECK(PC87312State, (obj), TYPE_PC87312_SUPERIO)
+#define TYPE_PC87312 "pc87312"
+OBJECT_DECLARE_SIMPLE_TYPE(PC87312State, PC87312)
-typedef struct PC87312State {
+struct PC87312State {
/*< private >*/
ISASuperIODevice parent_dev;
/*< public >*/
@@ -49,7 +50,7 @@ typedef struct PC87312State {
uint8_t selected_index;
uint8_t regs[3];
-} PC87312State;
+};
#endif
diff --git a/include/hw/isa/superio.h b/include/hw/isa/superio.h
index 147cc0a7b7..0dc45104d4 100644
--- a/include/hw/isa/superio.h
+++ b/include/hw/isa/superio.h
@@ -12,18 +12,17 @@
#include "sysemu/sysemu.h"
#include "hw/isa/isa.h"
+#include "qom/object.h"
#define TYPE_ISA_SUPERIO "isa-superio"
-#define ISA_SUPERIO(obj) \
- OBJECT_CHECK(ISASuperIODevice, (obj), TYPE_ISA_SUPERIO)
-#define ISA_SUPERIO_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ISASuperIOClass, (obj), TYPE_ISA_SUPERIO)
-#define ISA_SUPERIO_CLASS(klass) \
- OBJECT_CLASS_CHECK(ISASuperIOClass, (klass), TYPE_ISA_SUPERIO)
+typedef struct ISASuperIOClass ISASuperIOClass;
+typedef struct ISASuperIODevice ISASuperIODevice;
+DECLARE_OBJ_CHECKERS(ISASuperIODevice, ISASuperIOClass,
+ ISA_SUPERIO, TYPE_ISA_SUPERIO)
#define SUPERIO_MAX_SERIAL_PORTS 4
-typedef struct ISASuperIODevice {
+struct ISASuperIODevice {
/*< private >*/
ISADevice parent_obj;
/*< public >*/
@@ -33,7 +32,7 @@ typedef struct ISASuperIODevice {
ISADevice *floppy;
ISADevice *kbc;
ISADevice *ide;
-} ISASuperIODevice;
+};
typedef struct ISASuperIOFuncs {
size_t count;
@@ -43,9 +42,9 @@ typedef struct ISASuperIOFuncs {
unsigned int (*get_dma)(ISASuperIODevice *sio, uint8_t index);
} ISASuperIOFuncs;
-typedef struct ISASuperIOClass {
+struct ISASuperIOClass {
/*< private >*/
- ISADeviceClass parent_class;
+ DeviceClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
@@ -53,7 +52,7 @@ typedef struct ISASuperIOClass {
ISASuperIOFuncs serial;
ISASuperIOFuncs floppy;
ISASuperIOFuncs ide;
-} ISASuperIOClass;
+};
#define TYPE_FDC37M81X_SUPERIO "fdc37m81x-superio"
#define TYPE_SMC37C669_SUPERIO "smc37c669-superio"
diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h
index f23f45dfb1..da1722daf2 100644
--- a/include/hw/isa/vt82c686.h
+++ b/include/hw/isa/vt82c686.h
@@ -1,14 +1,39 @@
#ifndef HW_VT82C686_H
#define HW_VT82C686_H
+#include "hw/pci/pci_device.h"
+#include "audio/audio.h"
-#define TYPE_VT82C686B_SUPERIO "vt82c686b-superio"
+#define TYPE_VT82C686B_ISA "vt82c686b-isa"
+#define TYPE_VT82C686B_USB_UHCI "vt82c686b-usb-uhci"
+#define TYPE_VT8231_ISA "vt8231-isa"
+#define TYPE_VIA_AC97 "via-ac97"
+#define TYPE_VIA_IDE "via-ide"
+#define TYPE_VIA_MC97 "via-mc97"
-/* vt82c686.c */
-ISABus *vt82c686b_isa_init(PCIBus * bus, int devfn);
-void vt82c686b_ac97_init(PCIBus *bus, int devfn);
-void vt82c686b_mc97_init(PCIBus *bus, int devfn);
-I2CBus *vt82c686b_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
- qemu_irq sci_irq);
+typedef struct {
+ uint8_t stat;
+ uint8_t type;
+ uint32_t base;
+ uint32_t curr;
+ uint32_t addr;
+ uint32_t clen;
+} ViaAC97SGDChannel;
+
+OBJECT_DECLARE_SIMPLE_TYPE(ViaAC97State, VIA_AC97);
+
+struct ViaAC97State {
+ PCIDevice dev;
+ QEMUSoundCard card;
+ MemoryRegion sgd;
+ MemoryRegion fm;
+ MemoryRegion midi;
+ SWVoiceOut *vo;
+ ViaAC97SGDChannel aur;
+ uint16_t codec_regs[128];
+ uint32_t ac97_cmd;
+};
+
+void via_isa_set_irq(PCIDevice *d, int n, int level);
#endif
diff --git a/include/hw/kvm/clock.h b/include/hw/kvm/clock.h
deleted file mode 100644
index 81c66b2302..0000000000
--- a/include/hw/kvm/clock.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * QEMU KVM support, paravirtual clock device
- *
- * Copyright (C) 2011 Siemens AG
- *
- * Authors:
- * Jan Kiszka <jan.kiszka@siemens.com>
- *
- * This work is licensed under the terms of the GNU GPL version 2.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef HW_KVM_CLOCK_H
-#define HW_KVM_CLOCK_H
-
-#ifdef CONFIG_KVM
-
-void kvmclock_create(void);
-
-#else /* CONFIG_KVM */
-
-static inline void kvmclock_create(void)
-{
-}
-
-#endif /* !CONFIG_KVM */
-
-#endif
diff --git a/include/hw/lm32/lm32_pic.h b/include/hw/lm32/lm32_pic.h
deleted file mode 100644
index 9e5e038437..0000000000
--- a/include/hw/lm32/lm32_pic.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef QEMU_HW_LM32_PIC_H
-#define QEMU_HW_LM32_PIC_H
-
-
-uint32_t lm32_pic_get_ip(DeviceState *d);
-uint32_t lm32_pic_get_im(DeviceState *d);
-void lm32_pic_set_ip(DeviceState *d, uint32_t ip);
-void lm32_pic_set_im(DeviceState *d, uint32_t im);
-
-#endif /* QEMU_HW_LM32_PIC_H */
diff --git a/include/hw/loader-fit.h b/include/hw/loader-fit.h
index 0284c3e02c..0832e379dc 100644
--- a/include/hw/loader-fit.h
+++ b/include/hw/loader-fit.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/hw/loader.h b/include/hw/loader.h
index a9eeea3952..8685e27334 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -40,8 +40,8 @@ ssize_t load_image_size(const char *filename, void *addr, size_t size);
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_image_targphys_as(const char *filename,
- hwaddr addr, uint64_t max_sz, AddressSpace *as);
+ssize_t load_image_targphys_as(const char *filename,
+ hwaddr addr, uint64_t max_sz, AddressSpace *as);
/**load_targphys_hex_as:
* @filename: Path to the .hex file
@@ -53,14 +53,15 @@ int load_image_targphys_as(const char *filename,
*
* Returns the size of the loaded .hex file on success, -1 otherwise.
*/
-int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as);
+ssize_t load_targphys_hex_as(const char *filename, hwaddr *entry,
+ AddressSpace *as);
/** load_image_targphys:
* Same as load_image_targphys_as(), but doesn't allow the caller to specify
* an AddressSpace.
*/
-int load_image_targphys(const char *filename, hwaddr,
- uint64_t max_sz);
+ssize_t load_image_targphys(const char *filename, hwaddr,
+ uint64_t max_sz);
/**
* load_image_mr: load an image into a memory region
@@ -73,7 +74,7 @@ int load_image_targphys(const char *filename, hwaddr,
* If the file is larger than the memory region's size the call will fail.
* Returns -1 on failure, or the size of the file.
*/
-int load_image_mr(const char *filename, MemoryRegion *mr);
+ssize_t load_image_mr(const char *filename, MemoryRegion *mr);
/* This is the limit on the maximum uncompressed image size that
* load_image_gzipped_buffer() and load_image_gzipped() will read. It prevents
@@ -81,16 +82,35 @@ int load_image_mr(const char *filename, MemoryRegion *mr);
*/
#define LOAD_IMAGE_MAX_GUNZIP_BYTES (256 << 20)
-int load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
- uint8_t **buffer);
-int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
+ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
+ uint8_t **buffer);
+ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);
+
+/**
+ * unpack_efi_zboot_image:
+ * @buffer: pointer to a variable holding the address of a buffer containing the
+ * image
+ * @size: pointer to a variable holding the size of the buffer
+ *
+ * Check whether the buffer contains a EFI zboot image, and if it does, extract
+ * the compressed payload and decompress it into a new buffer. If successful,
+ * the old buffer is freed, and the *buffer and size variables pointed to by the
+ * function arguments are updated to refer to the newly populated buffer.
+ *
+ * Returns 0 if the image could not be identified as a EFI zboot image.
+ * Returns -1 if the buffer contents were identified as a EFI zboot image, but
+ * unpacking failed for any reason.
+ * Returns the size of the decompressed payload if decompression was performed
+ * successfully.
+ */
+ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size);
#define ELF_LOAD_FAILED -1
#define ELF_LOAD_NOT_ELF -2
#define ELF_LOAD_WRONG_ARCH -3
#define ELF_LOAD_WRONG_ENDIAN -4
#define ELF_LOAD_TOO_BIG -5
-const char *load_elf_strerror(int error);
+const char *load_elf_strerror(ssize_t error);
/** load_elf_ram_sym:
* @filename: Path of ELF file
@@ -128,48 +148,48 @@ const char *load_elf_strerror(int error);
typedef void (*symbol_fn_t)(const char *st_name, int st_info,
uint64_t st_value, uint64_t st_size);
-int load_elf_ram_sym(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags,
- int big_endian, int elf_machine,
- int clear_lsb, int data_swab,
- AddressSpace *as, bool load_rom, symbol_fn_t sym_cb);
+ssize_t load_elf_ram_sym(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ uint32_t *pflags, int big_endian, int elf_machine,
+ int clear_lsb, int data_swab,
+ AddressSpace *as, bool load_rom, symbol_fn_t sym_cb);
/** load_elf_ram:
* Same as load_elf_ram_sym(), but doesn't allow the caller to specify a
* symbol callback function
*/
-int load_elf_ram(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
- int elf_machine, int clear_lsb, int data_swab,
- AddressSpace *as, bool load_rom);
+ssize_t load_elf_ram(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags,
+ int big_endian, int elf_machine, int clear_lsb,
+ int data_swab, AddressSpace *as, bool load_rom);
/** load_elf_as:
* Same as load_elf_ram(), but always loads the elf as ROM
*/
-int load_elf_as(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
- int elf_machine, int clear_lsb, int data_swab,
- AddressSpace *as);
+ssize_t load_elf_as(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
+ uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ int elf_machine, int clear_lsb, int data_swab,
+ AddressSpace *as);
/** load_elf:
* Same as load_elf_as(), but doesn't allow the caller to specify an
* AddressSpace.
*/
-int load_elf(const char *filename,
- uint64_t (*elf_note_fn)(void *, void *, bool),
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
- uint64_t *highaddr, uint32_t *pflags, int big_endian,
- int elf_machine, int clear_lsb, int data_swab);
+ssize_t load_elf(const char *filename,
+ uint64_t (*elf_note_fn)(void *, void *, bool),
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
+ uint64_t *highaddr, uint32_t *pflags, int big_endian,
+ int elf_machine, int clear_lsb, int data_swab);
/** load_elf_hdr:
* @filename: Path of ELF file
@@ -183,8 +203,8 @@ int load_elf(const char *filename,
*/
void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp);
-int load_aout(const char *filename, hwaddr addr, int max_sz,
- int bswap_needed, hwaddr target_page_size);
+ssize_t load_aout(const char *filename, hwaddr addr, int max_sz,
+ int bswap_needed, hwaddr target_page_size);
#define LOAD_UIMAGE_LOADADDR_INVALID (-1)
@@ -205,19 +225,19 @@ int load_aout(const char *filename, hwaddr addr, int max_sz,
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_uimage_as(const char *filename, hwaddr *ep,
- hwaddr *loadaddr, int *is_linux,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque, AddressSpace *as);
+ssize_t load_uimage_as(const char *filename, hwaddr *ep,
+ hwaddr *loadaddr, int *is_linux,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, AddressSpace *as);
/** load_uimage:
* Same as load_uimage_as(), but doesn't allow the caller to specify an
* AddressSpace.
*/
-int load_uimage(const char *filename, hwaddr *ep,
- hwaddr *loadaddr, int *is_linux,
- uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque);
+ssize_t load_uimage(const char *filename, hwaddr *ep,
+ hwaddr *loadaddr, int *is_linux,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque);
/**
* load_ramdisk_as:
@@ -232,15 +252,15 @@ int load_uimage(const char *filename, hwaddr *ep,
*
* Returns the size of the loaded image on success, -1 otherwise.
*/
-int load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz,
- AddressSpace *as);
+ssize_t load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz,
+ AddressSpace *as);
/**
* load_ramdisk:
* Same as load_ramdisk_as(), but doesn't allow the caller to specify
* an AddressSpace.
*/
-int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz);
+ssize_t load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz);
ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen);
@@ -250,12 +270,9 @@ void pstrcpy_targphys(const char *name,
hwaddr dest, int buf_size,
const char *source);
-extern bool option_rom_has_mr;
-extern bool rom_file_has_mr;
-
-int rom_add_file(const char *file, const char *fw_dir,
- hwaddr addr, int32_t bootindex,
- bool option_rom, MemoryRegion *mr, AddressSpace *as);
+ssize_t rom_add_file(const char *file, const char *fw_dir,
+ hwaddr addr, int32_t bootindex,
+ bool has_option_rom, MemoryRegion *mr, AddressSpace *as);
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr,
const char *fw_file_name,
@@ -290,6 +307,37 @@ void rom_transaction_end(bool commit);
int rom_copy(uint8_t *dest, hwaddr addr, size_t size);
void *rom_ptr(hwaddr addr, size_t size);
+/**
+ * rom_ptr_for_as: Return a pointer to ROM blob data for the address
+ * @as: AddressSpace to look for the ROM blob in
+ * @addr: Address within @as
+ * @size: size of data required in bytes
+ *
+ * Returns: pointer into the data which backs the matching ROM blob,
+ * or NULL if no blob covers the address range.
+ *
+ * This function looks for a ROM blob which covers the specified range
+ * of bytes of length @size starting at @addr within the address space
+ * @as. This is useful for code which runs as part of board
+ * initialization or CPU reset which wants to read data that is part
+ * of a user-supplied guest image or other guest memory contents, but
+ * which runs before the ROM loader's reset function has copied the
+ * blobs into guest memory.
+ *
+ * rom_ptr_for_as() will look not just for blobs loaded directly to
+ * the specified address, but also for blobs which were loaded to an
+ * alias of the region at a different location in the AddressSpace.
+ * In other words, if a machine model has RAM at address 0x0000_0000
+ * which is aliased to also appear at 0x1000_0000, rom_ptr_for_as()
+ * will return the correct data whether the guest image was linked and
+ * loaded at 0x0000_0000 or 0x1000_0000. Contrast rom_ptr(), which
+ * will only return data if the image load address is an exact match
+ * with the queried address.
+ *
+ * New code should prefer to use rom_ptr_for_as() instead of
+ * rom_ptr().
+ */
+void *rom_ptr_for_as(AddressSpace *as, hwaddr addr, size_t size);
void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_file_fixed(_f, _a, _i) \
@@ -305,17 +353,25 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \
rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true)
-#define PC_ROM_MIN_VGA 0xc0000
-#define PC_ROM_MIN_OPTION 0xc8000
-#define PC_ROM_MAX 0xe0000
-#define PC_ROM_ALIGN 0x800
-#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA)
-
-int rom_add_vga(const char *file);
-int rom_add_option(const char *file, int32_t bootindex);
+ssize_t rom_add_vga(const char *file);
+ssize_t rom_add_option(const char *file, int32_t bootindex);
/* This is the usual maximum in uboot, so if a uImage overflows this, it would
* overflow on real hardware too. */
#define UBOOT_MAX_GUNZIP_BYTES (64 << 20)
+typedef struct RomGap {
+ hwaddr base;
+ size_t size;
+} RomGap;
+
+/**
+ * rom_find_largest_gap_between: return largest gap between ROMs in given range
+ *
+ * Given a range of addresses, this function finds the largest
+ * contiguous subrange which has no ROMs loaded to it. That is,
+ * it finds the biggest gap which is free for use for other things.
+ */
+RomGap rom_find_largest_gap_between(hwaddr base, size_t size);
+
#endif
diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h
new file mode 100644
index 0000000000..252f7df7f4
--- /dev/null
+++ b/include/hw/loongarch/virt.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for loongarch board emulation.
+ *
+ * Copyright (C) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LOONGARCH_H
+#define HW_LOONGARCH_H
+
+#include "target/loongarch/cpu.h"
+#include "hw/boards.h"
+#include "qemu/queue.h"
+#include "hw/intc/loongarch_ipi.h"
+#include "hw/block/flash.h"
+
+#define LOONGARCH_MAX_CPUS 256
+
+#define VIRT_FWCFG_BASE 0x1e020000UL
+#define VIRT_BIOS_BASE 0x1c000000UL
+#define VIRT_BIOS_SIZE (16 * MiB)
+#define VIRT_FLASH_SECTOR_SIZE (128 * KiB)
+#define VIRT_FLASH0_BASE VIRT_BIOS_BASE
+#define VIRT_FLASH0_SIZE VIRT_BIOS_SIZE
+#define VIRT_FLASH1_BASE 0x1d000000UL
+#define VIRT_FLASH1_SIZE (16 * MiB)
+
+#define VIRT_LOWMEM_BASE 0
+#define VIRT_LOWMEM_SIZE 0x10000000
+#define VIRT_HIGHMEM_BASE 0x80000000
+#define VIRT_GED_EVT_ADDR 0x100e0000
+#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN)
+#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN)
+
+struct LoongArchMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ MemoryRegion lowmem;
+ MemoryRegion highmem;
+ MemoryRegion bios;
+ bool bios_loaded;
+ /* State for other subsystems/APIs: */
+ FWCfgState *fw_cfg;
+ Notifier machine_done;
+ Notifier powerdown_notifier;
+ OnOffAuto acpi;
+ char *oem_id;
+ char *oem_table_id;
+ DeviceState *acpi_ged;
+ int fdt_size;
+ DeviceState *platform_bus_dev;
+ PCIBus *pci_bus;
+ PFlashCFI01 *flash[2];
+ MemoryRegion system_iocsr;
+ MemoryRegion iocsr_mem;
+ AddressSpace as_iocsr;
+};
+
+#define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt")
+OBJECT_DECLARE_SIMPLE_TYPE(LoongArchMachineState, LOONGARCH_MACHINE)
+bool loongarch_is_acpi_enabled(LoongArchMachineState *lams);
+void loongarch_acpi_setup(LoongArchMachineState *lams);
+#endif
diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h
index 0db49c5e60..5d9f876ffe 100644
--- a/include/hw/m68k/mcf.h
+++ b/include/hw/m68k/mcf.h
@@ -2,6 +2,7 @@
#define HW_MCF_H
/* Motorola ColdFire device prototypes. */
+#include "exec/hwaddr.h"
#include "target/m68k/cpu-qom.h"
/* mcf_uart.c */
@@ -9,8 +10,8 @@ uint64_t mcf_uart_read(void *opaque, hwaddr addr,
unsigned size);
void mcf_uart_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size);
-void *mcf_uart_init(qemu_irq irq, Chardev *chr);
-void mcf_uart_mm_init(hwaddr base, qemu_irq irq, Chardev *chr);
+DeviceState *mcf_uart_create(qemu_irq irq, Chardev *chr);
+DeviceState *mcf_uart_create_mmap(hwaddr base, qemu_irq irq, Chardev *chr);
/* mcf_intc.c */
qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem,
@@ -18,7 +19,6 @@ qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem,
M68kCPU *cpu);
/* mcf5206.c */
-qemu_irq *mcf5206_init(struct MemoryRegion *sysmem,
- uint32_t base, M68kCPU *cpu);
+#define TYPE_MCF5206_MBAR "mcf5206-mbar"
#endif
diff --git a/include/hw/m68k/mcf_fec.h b/include/hw/m68k/mcf_fec.h
index eeb471f9c9..80d4f651ba 100644
--- a/include/hw/m68k/mcf_fec.h
+++ b/include/hw/m68k/mcf_fec.h
@@ -9,9 +9,10 @@
#ifndef HW_M68K_MCF_FEC_H
#define HW_M68K_MCF_FEC_H
+#include "qom/object.h"
#define TYPE_MCF_FEC_NET "mcf-fec"
-#define MCF_FEC_NET(obj) OBJECT_CHECK(mcf_fec_state, (obj), TYPE_MCF_FEC_NET)
+OBJECT_DECLARE_SIMPLE_TYPE(mcf_fec_state, MCF_FEC_NET)
#define FEC_NUM_IRQ 13
diff --git a/include/hw/m68k/next-cube.h b/include/hw/m68k/next-cube.h
index a3be2b32ab..43577282d1 100644
--- a/include/hw/m68k/next-cube.h
+++ b/include/hw/m68k/next-cube.h
@@ -1,3 +1,13 @@
+/*
+ * NeXT Cube
+ *
+ * Copyright (c) 2011 Bryce Lanham
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
#ifndef NEXT_CUBE_H
#define NEXT_CUBE_H
@@ -39,9 +49,8 @@ enum next_irqs {
NEXT_ENRX_DMA_I,
NEXT_SCSI_DMA_I,
NEXT_SCC_DMA_I,
- NEXT_SND_I
+ NEXT_SND_I,
+ NEXT_NUM_IRQS
};
-void next_irq(void *opaque, int number, int level);
-
#endif /* NEXT_CUBE_H */
diff --git a/include/hw/m68k/q800-glue.h b/include/hw/m68k/q800-glue.h
new file mode 100644
index 0000000000..04fac25f6c
--- /dev/null
+++ b/include/hw/m68k/q800-glue.h
@@ -0,0 +1,51 @@
+/*
+ * QEMU q800 logic GLUE (General Logic Unit)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_Q800_GLUE_H
+#define HW_Q800_GLUE_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_GLUE "q800-glue"
+OBJECT_DECLARE_SIMPLE_TYPE(GLUEState, GLUE)
+
+struct GLUEState {
+ SysBusDevice parent_obj;
+
+ M68kCPU *cpu;
+ uint8_t ipr;
+ uint8_t auxmode;
+ qemu_irq irqs[2];
+ QEMUTimer *nmi_release;
+};
+
+#define GLUE_IRQ_IN_VIA1 0
+#define GLUE_IRQ_IN_VIA2 1
+#define GLUE_IRQ_IN_SONIC 2
+#define GLUE_IRQ_IN_ESCC 3
+#define GLUE_IRQ_IN_NMI 4
+#define GLUE_IRQ_IN_ASC 5
+
+#define GLUE_IRQ_NUBUS_9 0
+#define GLUE_IRQ_ASC 1
+
+#endif
diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h
new file mode 100644
index 0000000000..34365c9860
--- /dev/null
+++ b/include/hw/m68k/q800.h
@@ -0,0 +1,78 @@
+/*
+ * QEMU Motorla 680x0 Macintosh hardware System Emulator
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_Q800_H
+#define HW_Q800_H
+
+#include "hw/boards.h"
+#include "qom/object.h"
+#include "target/m68k/cpu-qom.h"
+#include "exec/memory.h"
+#include "hw/m68k/q800-glue.h"
+#include "hw/misc/mac_via.h"
+#include "hw/net/dp8393x.h"
+#include "hw/char/escc.h"
+#include "hw/or-irq.h"
+#include "hw/scsi/esp.h"
+#include "hw/block/swim.h"
+#include "hw/nubus/mac-nubus-bridge.h"
+#include "hw/display/macfb.h"
+#include "hw/misc/djmemc.h"
+#include "hw/misc/iosb.h"
+#include "hw/audio/asc.h"
+
+/*
+ * The main Q800 machine
+ */
+
+struct Q800MachineState {
+ MachineState parent_obj;
+
+ bool easc;
+ M68kCPU cpu;
+ MemoryRegion rom;
+ MemoryRegion rom_alias;
+ GLUEState glue;
+ MOS6522Q800VIA1State via1;
+ MOS6522Q800VIA2State via2;
+ dp8393xState dp8393x;
+ MemoryRegion dp8393x_prom;
+ ESCCState escc;
+ OrIRQState escc_orgate;
+ SysBusESPState esp;
+ Swim swim;
+ MacNubusBridge mac_nubus_bridge;
+ MacfbNubusState macfb;
+ DJMEMCState djmemc;
+ IOSBState iosb;
+ ASCState asc;
+ MemoryRegion ramio;
+ MemoryRegion macio;
+ MemoryRegion macio_alias;
+ MemoryRegion machine_id;
+ MemoryRegion escc_alias;
+};
+
+#define TYPE_Q800_MACHINE MACHINE_TYPE_NAME("q800")
+OBJECT_DECLARE_SIMPLE_TYPE(Q800MachineState, Q800_MACHINE)
+
+#endif
diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h
index 04476acb8f..e0571c8a31 100644
--- a/include/hw/mem/memory-device.h
+++ b/include/hw/mem/memory-device.h
@@ -14,15 +14,14 @@
#define MEMORY_DEVICE_H
#include "hw/qdev-core.h"
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-machine.h"
#include "qom/object.h"
#define TYPE_MEMORY_DEVICE "memory-device"
-#define MEMORY_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(MemoryDeviceClass, (klass), TYPE_MEMORY_DEVICE)
-#define MEMORY_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MemoryDeviceClass, (obj), TYPE_MEMORY_DEVICE)
+typedef struct MemoryDeviceClass MemoryDeviceClass;
+DECLARE_CLASS_CHECKERS(MemoryDeviceClass, MEMORY_DEVICE,
+ TYPE_MEMORY_DEVICE)
#define MEMORY_DEVICE(obj) \
INTERFACE_CHECK(MemoryDeviceState, (obj), TYPE_MEMORY_DEVICE)
@@ -38,12 +37,27 @@ typedef struct MemoryDeviceState MemoryDeviceState;
* address in guest physical memory can either be specified explicitly
* or get assigned automatically.
*
+ * Some memory device might not own a memory region in certain device
+ * configurations. Such devices can logically get (un)plugged, however,
+ * empty memory devices are mostly ignored by the memory device code.
+ *
* Conceptually, memory devices only span one memory region. If multiple
* successive memory regions are used, a covering memory region has to
* be provided. Scattered memory regions are not supported for single
* devices.
+ *
+ * The device memory region returned via @get_memory_region may either be a
+ * single RAM memory region or a memory region container with subregions
+ * that are RAM memory regions or aliases to RAM memory regions. Other
+ * memory regions or subregions are not supported.
+ *
+ * If the device memory region returned via @get_memory_region is a
+ * memory region container, it's supported to dynamically (un)map subregions
+ * as long as the number of memslots returned by @get_memslots() won't
+ * be exceeded and as long as all memory regions are of the same kind (e.g.,
+ * all RAM or all ROM).
*/
-typedef struct MemoryDeviceClass {
+struct MemoryDeviceClass {
/* private */
InterfaceClass parent_class;
@@ -80,7 +94,8 @@ typedef struct MemoryDeviceClass {
uint64_t (*get_plugged_size)(const MemoryDeviceState *md, Error **errp);
/*
- * Return the memory region of the memory device.
+ * Return the memory region of the memory device. If the device is
+ * completely empty, returns NULL without an error.
*
* Called when (un)plugging the memory device, to (un)map the
* memory region in guest physical memory, but also to detect the
@@ -90,14 +105,69 @@ typedef struct MemoryDeviceClass {
MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
/*
+ * Optional: Instruct the memory device to decide how many memory slots
+ * it requires, not exceeding the given limit.
+ *
+ * Called exactly once when pre-plugging the memory device, before
+ * querying the number of memslots using @get_memslots the first time.
+ */
+ void (*decide_memslots)(MemoryDeviceState *md, unsigned int limit);
+
+ /*
+ * Optional for memory devices that require only a single memslot,
+ * required for all other memory devices: Return the number of memslots
+ * (distinct RAM memory regions in the device memory region) that are
+ * required by the device.
+ *
+ * If this function is not implemented, the assumption is "1".
+ *
+ * Called when (un)plugging the memory device, to check if the requirements
+ * can be satisfied, and to do proper accounting.
+ */
+ unsigned int (*get_memslots)(MemoryDeviceState *md);
+
+ /*
+ * Optional: Return the desired minimum alignment of the device in guest
+ * physical address space. The final alignment is computed based on this
+ * alignment and the alignment requirements of the memory region.
+ *
+ * Called when plugging the memory device to detect the required alignment
+ * during address assignment.
+ */
+ uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
+
+ /*
* Translate the memory device into #MemoryDeviceInfo.
*/
void (*fill_device_info)(const MemoryDeviceState *md,
MemoryDeviceInfo *info);
-} MemoryDeviceClass;
+};
+
+/*
+ * Traditionally, KVM/vhost in many setups supported 509 memslots, whereby
+ * 253 memslots were "reserved" for boot memory and other devices (such
+ * as PCI BARs, which can get mapped dynamically) and 256 memslots were
+ * dedicated for DIMMs. These magic numbers worked reliably in the past.
+ *
+ * Further, using many memslots can negatively affect performance, so setting
+ * the soft-limit of memslots used by memory devices to the traditional
+ * DIMM limit of 256 sounds reasonable.
+ *
+ * If we have less than 509 memslots, we will instruct memory devices that
+ * support automatically deciding how many memslots to use to only use a single
+ * one.
+ *
+ * Hotplugging vhost devices with at least 509 memslots is not expected to
+ * cause problems, not even when devices automatically decided how many memslots
+ * to use.
+ */
+#define MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT 256
+#define MEMORY_DEVICES_SAFE_MAX_MEMSLOTS 509
MemoryDeviceInfoList *qmp_memory_device_list(void);
uint64_t get_plugged_memory_size(void);
+unsigned int memory_devices_get_reserved_memslots(void);
+bool memory_devices_memslot_auto_decision_active(void);
void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
const uint64_t *legacy_align, Error **errp);
void memory_device_plug(MemoryDeviceState *md, MachineState *ms);
diff --git a/include/hw/mem/npcm7xx_mc.h b/include/hw/mem/npcm7xx_mc.h
new file mode 100644
index 0000000000..7ed38be243
--- /dev/null
+++ b/include/hw/mem/npcm7xx_mc.h
@@ -0,0 +1,36 @@
+/*
+ * Nuvoton NPCM7xx Memory Controller stub
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_MC_H
+#define NPCM7XX_MC_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/**
+ * struct NPCM7xxMCState - Device state for the memory controller.
+ * @parent: System bus device.
+ * @mmio: Memory region through which registers are accessed.
+ */
+typedef struct NPCM7xxMCState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+} NPCM7xxMCState;
+
+#define TYPE_NPCM7XX_MC "npcm7xx-mc"
+#define NPCM7XX_MC(obj) OBJECT_CHECK(NPCM7xxMCState, (obj), TYPE_NPCM7XX_MC)
+
+#endif /* NPCM7XX_MC_H */
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index b67a1aedf6..d3b763453a 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -27,14 +27,7 @@
#include "hw/acpi/bios-linker-loader.h"
#include "qemu/uuid.h"
#include "hw/acpi/aml-build.h"
-
-#define NVDIMM_DEBUG 0
-#define nvdimm_debug(fmt, ...) \
- do { \
- if (NVDIMM_DEBUG) { \
- fprintf(stderr, "nvdimm: " fmt, ## __VA_ARGS__); \
- } \
- } while (0)
+#include "qom/object.h"
/*
* The minimum label data size is required by NVDIMM Namespace
@@ -45,10 +38,7 @@
#define MIN_NAMESPACE_LABEL_SIZE (128UL << 10)
#define TYPE_NVDIMM "nvdimm"
-#define NVDIMM(obj) OBJECT_CHECK(NVDIMMDevice, (obj), TYPE_NVDIMM)
-#define NVDIMM_CLASS(oc) OBJECT_CLASS_CHECK(NVDIMMClass, (oc), TYPE_NVDIMM)
-#define NVDIMM_GET_CLASS(obj) OBJECT_GET_CLASS(NVDIMMClass, (obj), \
- TYPE_NVDIMM)
+OBJECT_DECLARE_TYPE(NVDIMMDevice, NVDIMMClass, NVDIMM)
#define NVDIMM_LABEL_SIZE_PROP "label-size"
#define NVDIMM_UUID_PROP "uuid"
@@ -88,11 +78,16 @@ struct NVDIMMDevice {
bool unarmed;
/*
+ * Whether our DIMM is backed by ROM, and even label data cannot be
+ * written. If set, implies that "unarmed" is also set.
+ */
+ bool readonly;
+
+ /*
* The PPC64 - spapr requires each nvdimm device have a uuid.
*/
QemuUUID uuid;
};
-typedef struct NVDIMMDevice NVDIMMDevice;
struct NVDIMMClass {
/* private */
@@ -106,8 +101,9 @@ struct NVDIMMClass {
/* write @size bytes from @buf to NVDIMM label data at @offset. */
void (*write_label_data)(NVDIMMDevice *nvdimm, const void *buf,
uint64_t size, uint64_t offset);
+ void (*realize)(NVDIMMDevice *nvdimm, Error **errp);
+ void (*unrealize)(NVDIMMDevice *nvdimm);
};
-typedef struct NVDIMMClass NVDIMMClass;
#define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem"
@@ -158,7 +154,8 @@ void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
void nvdimm_build_srat(GArray *table_data);
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
BIOSLinker *linker, NVDIMMState *state,
- uint32_t ram_slots);
+ uint32_t ram_slots, const char *oem_id,
+ const char *oem_table_id);
void nvdimm_plug(NVDIMMState *state);
void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev);
#endif
diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
index 289edc0f3d..322bebe555 100644
--- a/include/hw/mem/pc-dimm.h
+++ b/include/hw/mem/pc-dimm.h
@@ -18,14 +18,11 @@
#include "exec/memory.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_PC_DIMM "pc-dimm"
-#define PC_DIMM(obj) \
- OBJECT_CHECK(PCDIMMDevice, (obj), TYPE_PC_DIMM)
-#define PC_DIMM_CLASS(oc) \
- OBJECT_CLASS_CHECK(PCDIMMDeviceClass, (oc), TYPE_PC_DIMM)
-#define PC_DIMM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCDIMMDeviceClass, (obj), TYPE_PC_DIMM)
+OBJECT_DECLARE_TYPE(PCDIMMDevice, PCDIMMDeviceClass,
+ PC_DIMM)
#define PC_DIMM_ADDR_PROP "addr"
#define PC_DIMM_SLOT_PROP "slot"
@@ -44,7 +41,7 @@
* Default value: -1, means that slot is auto-allocated.
* @hostmem: host memory backend providing memory for @PCDIMMDevice
*/
-typedef struct PCDIMMDevice {
+struct PCDIMMDevice {
/* private */
DeviceState parent_obj;
@@ -53,28 +50,24 @@ typedef struct PCDIMMDevice {
uint32_t node;
int32_t slot;
HostMemoryBackend *hostmem;
-} PCDIMMDevice;
+};
/**
* PCDIMMDeviceClass:
* @realize: called after common dimm is realized so that the dimm based
* devices get the chance to do specified operations.
- * @get_vmstate_memory_region: returns #MemoryRegion which indicates the
- * memory of @dimm should be kept during live migration. Will not fail
- * after the device was realized.
*/
-typedef struct PCDIMMDeviceClass {
+struct PCDIMMDeviceClass {
/* private */
DeviceClass parent_class;
/* public */
void (*realize)(PCDIMMDevice *dimm, Error **errp);
- MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm,
- Error **errp);
-} PCDIMMDeviceClass;
+ void (*unrealize)(PCDIMMDevice *dimm);
+};
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
const uint64_t *legacy_align, Error **errp);
-void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine, Error **errp);
+void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine);
void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine);
#endif
diff --git a/include/hw/mem/sparse-mem.h b/include/hw/mem/sparse-mem.h
new file mode 100644
index 0000000000..f9863b154b
--- /dev/null
+++ b/include/hw/mem/sparse-mem.h
@@ -0,0 +1,19 @@
+/*
+ * A sparse memory device. Useful for fuzzing
+ *
+ * Copyright Red Hat Inc., 2021
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SPARSE_MEM_H
+#define SPARSE_MEM_H
+#define TYPE_SPARSE_MEM "sparse-mem"
+
+MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length);
+
+#endif
diff --git a/include/hw/mips/bios.h b/include/hw/mips/bios.h
deleted file mode 100644
index c03007999a..0000000000
--- a/include/hw/mips/bios.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef HW_MIPS_BIOS_H
-#define HW_MIPS_BIOS_H
-
-#include "qemu/units.h"
-#include "cpu.h"
-
-#define BIOS_SIZE (4 * MiB)
-#ifdef TARGET_WORDS_BIGENDIAN
-#define BIOS_FILENAME "mips_bios.bin"
-#else
-#define BIOS_FILENAME "mipsel_bios.bin"
-#endif
-
-#endif
diff --git a/include/hw/mips/bootloader.h b/include/hw/mips/bootloader.h
new file mode 100644
index 0000000000..c32f6c2835
--- /dev/null
+++ b/include/hw/mips/bootloader.h
@@ -0,0 +1,26 @@
+/*
+ * Utility for QEMU MIPS to generate it's simple bootloader
+ *
+ * Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MIPS_BOOTLOADER_H
+#define HW_MIPS_BOOTLOADER_H
+
+#include "exec/cpu-defs.h"
+
+void bl_gen_jump_to(void **ptr, target_ulong jump_addr);
+void bl_gen_jump_kernel(void **ptr,
+ bool set_sp, target_ulong sp,
+ bool set_a0, target_ulong a0,
+ bool set_a1, target_ulong a1,
+ bool set_a2, target_ulong a2,
+ bool set_a3, target_ulong a3,
+ target_ulong kernel_addr);
+void bl_gen_write_ulong(void **ptr, target_ulong addr, target_ulong val);
+void bl_gen_write_u32(void **ptr, target_ulong addr, uint32_t val);
+void bl_gen_write_u64(void **ptr, target_ulong addr, uint64_t val);
+
+#endif
diff --git a/include/hw/mips/cps.h b/include/hw/mips/cps.h
index a941c55f27..04d636246a 100644
--- a/include/hw/mips/cps.h
+++ b/include/hw/mips/cps.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,16 +21,18 @@
#define MIPS_CPS_H
#include "hw/sysbus.h"
+#include "hw/clock.h"
#include "hw/misc/mips_cmgcr.h"
#include "hw/intc/mips_gic.h"
#include "hw/misc/mips_cpc.h"
#include "hw/misc/mips_itu.h"
#include "target/mips/cpu.h"
+#include "qom/object.h"
#define TYPE_MIPS_CPS "mips-cps"
-#define MIPS_CPS(obj) OBJECT_CHECK(MIPSCPSState, (obj), TYPE_MIPS_CPS)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSCPSState, MIPS_CPS)
-typedef struct MIPSCPSState {
+struct MIPSCPSState {
SysBusDevice parent_obj;
uint32_t num_vp;
@@ -42,7 +44,8 @@ typedef struct MIPSCPSState {
MIPSGICState gic;
MIPSCPCState cpc;
MIPSITUState itu;
-} MIPSCPSState;
+ Clock *clock;
+};
qemu_irq get_cps_irq(MIPSCPSState *cps, int pin_number);
diff --git a/include/hw/mips/cpudevs.h b/include/hw/mips/cpudevs.h
deleted file mode 100644
index 291f59281a..0000000000
--- a/include/hw/mips/cpudevs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef HW_MIPS_CPUDEVS_H
-#define HW_MIPS_CPUDEVS_H
-
-#include "target/mips/cpu-qom.h"
-
-/* Definitions for MIPS CPU internal devices. */
-
-/* addr.c */
-uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr);
-uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr);
-uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr);
-bool mips_um_ksegs_enabled(void);
-void mips_um_ksegs_enable(void);
-
-/* mips_int.c */
-void cpu_mips_irq_init_cpu(MIPSCPU *cpu);
-
-/* mips_timer.c */
-void cpu_mips_clock_init(MIPSCPU *cpu);
-
-#endif
diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h
index 0af4c3d5d7..101799f7d3 100644
--- a/include/hw/mips/mips.h
+++ b/include/hw/mips/mips.h
@@ -2,14 +2,13 @@
#define HW_MIPS_H
/* Definitions for mips board emulation. */
+#include "qemu/units.h"
+
/* Kernels can be configured with 64KB pages */
-#define INITRD_PAGE_MASK (~((1 << 16) - 1))
+#define INITRD_PAGE_SIZE (64 * KiB)
#include "exec/memory.h"
-/* gt64xxx.c */
-PCIBus *gt64120_register(qemu_irq *pic);
-
/* bonito.c */
PCIBus *bonito_init(qemu_irq *pic);
diff --git a/include/hw/misc/a9scu.h b/include/hw/misc/a9scu.h
index efb0c305c2..c3759fb8c8 100644
--- a/include/hw/misc/a9scu.h
+++ b/include/hw/misc/a9scu.h
@@ -11,10 +11,11 @@
#define HW_MISC_A9SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
/* A9MP private memory region. */
-typedef struct A9SCUState {
+struct A9SCUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -23,9 +24,9 @@ typedef struct A9SCUState {
uint32_t control;
uint32_t status;
uint32_t num_cpu;
-} A9SCUState;
+};
#define TYPE_A9_SCU "a9-scu"
-#define A9_SCU(obj) OBJECT_CHECK(A9SCUState, (obj), TYPE_A9_SCU)
+OBJECT_DECLARE_SIMPLE_TYPE(A9SCUState, A9_SCU)
#endif
diff --git a/include/hw/misc/allwinner-a10-ccm.h b/include/hw/misc/allwinner-a10-ccm.h
new file mode 100644
index 0000000000..7f22532efa
--- /dev/null
+++ b/include/hw/misc/allwinner-a10-ccm.h
@@ -0,0 +1,67 @@
+/*
+ * Allwinner A10 Clock Control Module emulation
+ *
+ * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner H3 CCU,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_A10_CCM_H
+#define HW_MISC_ALLWINNER_A10_CCM_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by CCM device */
+#define AW_A10_CCM_IOSIZE (0x400)
+
+/** Total number of known registers */
+#define AW_A10_CCM_REGS_NUM (AW_A10_CCM_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_A10_CCM "allwinner-a10-ccm"
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10ClockCtlState, AW_A10_CCM)
+
+/** @} */
+
+/**
+ * Allwinner A10 CCM object instance state.
+ */
+struct AwA10ClockCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_A10_CCM_REGS_NUM];
+};
+
+#endif /* HW_MISC_ALLWINNER_H3_CCU_H */
diff --git a/include/hw/misc/allwinner-a10-dramc.h b/include/hw/misc/allwinner-a10-dramc.h
new file mode 100644
index 0000000000..b61fbecbe7
--- /dev/null
+++ b/include/hw/misc/allwinner-a10-dramc.h
@@ -0,0 +1,68 @@
+/*
+ * Allwinner A10 DRAM Controller emulation
+ *
+ * Copyright (C) 2022 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner H3 DRAMC,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_A10_DRAMC_H
+#define HW_MISC_ALLWINNER_A10_DRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by DRAMC device */
+#define AW_A10_DRAMC_IOSIZE (0x1000)
+
+/** Total number of known registers */
+#define AW_A10_DRAMC_REGS_NUM (AW_A10_DRAMC_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_A10_DRAMC "allwinner-a10-dramc"
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10DramControllerState, AW_A10_DRAMC)
+
+/** @} */
+
+/**
+ * Allwinner A10 DRAMC object instance state.
+ */
+struct AwA10DramControllerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_A10_DRAMC_REGS_NUM];
+};
+
+#endif /* HW_MISC_ALLWINNER_A10_DRAMC_H */
diff --git a/include/hw/misc/allwinner-cpucfg.h b/include/hw/misc/allwinner-cpucfg.h
index 2c3693a8be..a717b47299 100644
--- a/include/hw/misc/allwinner-cpucfg.h
+++ b/include/hw/misc/allwinner-cpucfg.h
@@ -29,15 +29,14 @@
*/
#define TYPE_AW_CPUCFG "allwinner-cpucfg"
-#define AW_CPUCFG(obj) \
- OBJECT_CHECK(AwCpuCfgState, (obj), TYPE_AW_CPUCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(AwCpuCfgState, AW_CPUCFG)
/** @} */
/**
* Allwinner CPU Configuration Module instance state
*/
-typedef struct AwCpuCfgState {
+struct AwCpuCfgState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -47,6 +46,6 @@ typedef struct AwCpuCfgState {
uint32_t super_standby;
uint32_t entry_addr;
-} AwCpuCfgState;
+};
#endif /* HW_MISC_ALLWINNER_CPUCFG_H */
diff --git a/include/hw/misc/allwinner-h3-ccu.h b/include/hw/misc/allwinner-h3-ccu.h
index eec59649f3..a04875bfca 100644
--- a/include/hw/misc/allwinner-h3-ccu.h
+++ b/include/hw/misc/allwinner-h3-ccu.h
@@ -42,15 +42,14 @@
*/
#define TYPE_AW_H3_CCU "allwinner-h3-ccu"
-#define AW_H3_CCU(obj) \
- OBJECT_CHECK(AwH3ClockCtlState, (obj), TYPE_AW_H3_CCU)
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3ClockCtlState, AW_H3_CCU)
/** @} */
/**
* Allwinner H3 CCU object instance state.
*/
-typedef struct AwH3ClockCtlState {
+struct AwH3ClockCtlState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -61,6 +60,6 @@ typedef struct AwH3ClockCtlState {
/** Array of hardware registers */
uint32_t regs[AW_H3_CCU_REGS_NUM];
-} AwH3ClockCtlState;
+};
#endif /* HW_MISC_ALLWINNER_H3_CCU_H */
diff --git a/include/hw/misc/allwinner-h3-dramc.h b/include/hw/misc/allwinner-h3-dramc.h
index bacdf236b7..0b6c877ef7 100644
--- a/include/hw/misc/allwinner-h3-dramc.h
+++ b/include/hw/misc/allwinner-h3-dramc.h
@@ -58,15 +58,14 @@
*/
#define TYPE_AW_H3_DRAMC "allwinner-h3-dramc"
-#define AW_H3_DRAMC(obj) \
- OBJECT_CHECK(AwH3DramCtlState, (obj), TYPE_AW_H3_DRAMC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3DramCtlState, AW_H3_DRAMC)
/** @} */
/**
* Allwinner H3 SDRAM Controller object instance state.
*/
-typedef struct AwH3DramCtlState {
+struct AwH3DramCtlState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -101,6 +100,6 @@ typedef struct AwH3DramCtlState {
/** @} */
-} AwH3DramCtlState;
+};
#endif /* HW_MISC_ALLWINNER_H3_DRAMC_H */
diff --git a/include/hw/misc/allwinner-h3-sysctrl.h b/include/hw/misc/allwinner-h3-sysctrl.h
index af4119e026..ec1c220535 100644
--- a/include/hw/misc/allwinner-h3-sysctrl.h
+++ b/include/hw/misc/allwinner-h3-sysctrl.h
@@ -43,15 +43,14 @@
*/
#define TYPE_AW_H3_SYSCTRL "allwinner-h3-sysctrl"
-#define AW_H3_SYSCTRL(obj) \
- OBJECT_CHECK(AwH3SysCtrlState, (obj), TYPE_AW_H3_SYSCTRL)
+OBJECT_DECLARE_SIMPLE_TYPE(AwH3SysCtrlState, AW_H3_SYSCTRL)
/** @} */
/**
* Allwinner H3 System Control object instance state
*/
-typedef struct AwH3SysCtrlState {
+struct AwH3SysCtrlState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -62,6 +61,6 @@ typedef struct AwH3SysCtrlState {
/** Array of hardware registers */
uint32_t regs[AW_H3_SYSCTRL_REGS_NUM];
-} AwH3SysCtrlState;
+};
#endif /* HW_MISC_ALLWINNER_H3_SYSCTRL_H */
diff --git a/include/hw/misc/allwinner-r40-ccu.h b/include/hw/misc/allwinner-r40-ccu.h
new file mode 100644
index 0000000000..ceb74eff92
--- /dev/null
+++ b/include/hw/misc/allwinner-r40-ccu.h
@@ -0,0 +1,65 @@
+/*
+ * Allwinner R40 Clock Control Unit emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_R40_CCU_H
+#define HW_MISC_ALLWINNER_R40_CCU_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+
+/**
+ * @name Constants
+ * @{
+ */
+
+/** Size of register I/O address space used by CCU device */
+#define AW_R40_CCU_IOSIZE (0x400)
+
+/** Total number of known registers */
+#define AW_R40_CCU_REGS_NUM (AW_R40_CCU_IOSIZE / sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * @name Object model
+ * @{
+ */
+
+#define TYPE_AW_R40_CCU "allwinner-r40-ccu"
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40ClockCtlState, AW_R40_CCU)
+
+/** @} */
+
+/**
+ * Allwinner R40 CCU object instance state.
+ */
+struct AwR40ClockCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /** Array of hardware registers */
+ uint32_t regs[AW_R40_CCU_REGS_NUM];
+
+};
+
+#endif /* HW_MISC_ALLWINNER_R40_CCU_H */
diff --git a/include/hw/misc/allwinner-r40-dramc.h b/include/hw/misc/allwinner-r40-dramc.h
new file mode 100644
index 0000000000..6a1a3a7893
--- /dev/null
+++ b/include/hw/misc/allwinner-r40-dramc.h
@@ -0,0 +1,108 @@
+/*
+ * Allwinner R40 SDRAM Controller emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_R40_DRAMC_H
+#define HW_MISC_ALLWINNER_R40_DRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "exec/hwaddr.h"
+
+/**
+ * Constants
+ * @{
+ */
+
+/** Highest register address used by DRAMCOM module */
+#define AW_R40_DRAMCOM_REGS_MAXADDR (0x804)
+
+/** Total number of known DRAMCOM registers */
+#define AW_R40_DRAMCOM_REGS_NUM (AW_R40_DRAMCOM_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMCTL module */
+#define AW_R40_DRAMCTL_REGS_MAXADDR (0x88c)
+
+/** Total number of known DRAMCTL registers */
+#define AW_R40_DRAMCTL_REGS_NUM (AW_R40_DRAMCTL_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** Highest register address used by DRAMPHY module */
+#define AW_R40_DRAMPHY_REGS_MAXADDR (0x4)
+
+/** Total number of known DRAMPHY registers */
+#define AW_R40_DRAMPHY_REGS_NUM (AW_R40_DRAMPHY_REGS_MAXADDR / \
+ sizeof(uint32_t))
+
+/** @} */
+
+/**
+ * Object model
+ * @{
+ */
+
+#define TYPE_AW_R40_DRAMC "allwinner-r40-dramc"
+OBJECT_DECLARE_SIMPLE_TYPE(AwR40DramCtlState, AW_R40_DRAMC)
+
+/** @} */
+
+/**
+ * Allwinner R40 SDRAM Controller object instance state.
+ */
+struct AwR40DramCtlState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Physical base address for start of RAM */
+ hwaddr ram_addr;
+
+ /** Total RAM size in megabytes */
+ uint32_t ram_size;
+
+ uint8_t set_row_bits;
+ uint8_t set_bank_bits;
+ uint8_t set_col_bits;
+
+ /**
+ * @name Memory Regions
+ * @{
+ */
+ MemoryRegion dramcom_iomem; /**< DRAMCOM module I/O registers */
+ MemoryRegion dramctl_iomem; /**< DRAMCTL module I/O registers */
+ MemoryRegion dramphy_iomem; /**< DRAMPHY module I/O registers */
+ MemoryRegion dram_high; /**< The high 1G dram for dualrank detect */
+ MemoryRegion detect_cells; /**< DRAM memory cells for auto detect */
+
+ /** @} */
+
+ /**
+ * @name Hardware Registers
+ * @{
+ */
+
+ uint32_t dramcom[AW_R40_DRAMCOM_REGS_NUM]; /**< DRAMCOM registers */
+ uint32_t dramctl[AW_R40_DRAMCTL_REGS_NUM]; /**< DRAMCTL registers */
+ uint32_t dramphy[AW_R40_DRAMPHY_REGS_NUM] ;/**< DRAMPHY registers */
+
+ /** @} */
+
+};
+
+#endif /* HW_MISC_ALLWINNER_R40_DRAMC_H */
diff --git a/include/hw/misc/allwinner-sid.h b/include/hw/misc/allwinner-sid.h
index 4c1fa4762b..3bfa887a96 100644
--- a/include/hw/misc/allwinner-sid.h
+++ b/include/hw/misc/allwinner-sid.h
@@ -30,15 +30,14 @@
*/
#define TYPE_AW_SID "allwinner-sid"
-#define AW_SID(obj) \
- OBJECT_CHECK(AwSidState, (obj), TYPE_AW_SID)
+OBJECT_DECLARE_SIMPLE_TYPE(AwSidState, AW_SID)
/** @} */
/**
* Allwinner Security ID object instance state
*/
-typedef struct AwSidState {
+struct AwSidState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -55,6 +54,6 @@ typedef struct AwSidState {
/** Stores the emulated device identifier */
QemuUUID identifier;
-} AwSidState;
+};
#endif /* HW_MISC_ALLWINNER_SID_H */
diff --git a/include/hw/misc/allwinner-sramc.h b/include/hw/misc/allwinner-sramc.h
new file mode 100644
index 0000000000..66b01b8d04
--- /dev/null
+++ b/include/hw/misc/allwinner-sramc.h
@@ -0,0 +1,69 @@
+/*
+ * Allwinner SRAM controller emulation
+ *
+ * Copyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MISC_ALLWINNER_SRAMC_H
+#define HW_MISC_ALLWINNER_SRAMC_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "qemu/uuid.h"
+
+/**
+ * Object model
+ * @{
+ */
+#define TYPE_AW_SRAMC "allwinner-sramc"
+#define TYPE_AW_SRAMC_SUN8I_R40 TYPE_AW_SRAMC "-sun8i-r40"
+OBJECT_DECLARE_TYPE(AwSRAMCState, AwSRAMCClass, AW_SRAMC)
+
+/** @} */
+
+/**
+ * Allwinner SRAMC object instance state
+ */
+struct AwSRAMCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ /** Maps I/O registers in physical memory */
+ MemoryRegion iomem;
+
+ /* registers */
+ uint32_t sram_ctl1;
+ uint32_t sram_ver;
+ uint32_t sram_soft_entry_reg0;
+};
+
+/**
+ * Allwinner SRAM Controller class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwSRAMCClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ uint32_t sram_version_code;
+};
+
+#endif /* HW_MISC_ALLWINNER_SRAMC_H */
diff --git a/include/hw/misc/arm11scu.h b/include/hw/misc/arm11scu.h
index 5ad0f3d339..e5c0282aec 100644
--- a/include/hw/misc/arm11scu.h
+++ b/include/hw/misc/arm11scu.h
@@ -12,11 +12,12 @@
#define HW_MISC_ARM11SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ARM11_SCU "arm11-scu"
-#define ARM11_SCU(obj) OBJECT_CHECK(ARM11SCUState, (obj), TYPE_ARM11_SCU)
+OBJECT_DECLARE_SIMPLE_TYPE(ARM11SCUState, ARM11_SCU)
-typedef struct ARM11SCUState {
+struct ARM11SCUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -24,6 +25,6 @@ typedef struct ARM11SCUState {
uint32_t control;
uint32_t num_cpu;
MemoryRegion iomem;
-} ARM11SCUState;
+};
#endif
diff --git a/include/hw/misc/arm_integrator_debug.h b/include/hw/misc/arm_integrator_debug.h
index 0077dacb44..798b082164 100644
--- a/include/hw/misc/arm_integrator_debug.h
+++ b/include/hw/misc/arm_integrator_debug.h
@@ -3,7 +3,7 @@
*
* Browse the data sheet:
*
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0159b/Babbfijf.html
+ * https://developer.arm.com/documentation/dui0159/b/peripherals-and-interfaces/debug-leds-and-dip-switch-interface
*
* Copyright (c) 2013 Alex Bennée <alex@bennee.com>
*
diff --git a/include/hw/misc/armsse-cpu-pwrctrl.h b/include/hw/misc/armsse-cpu-pwrctrl.h
new file mode 100644
index 0000000000..51d45ede7d
--- /dev/null
+++ b/include/hw/misc/armsse-cpu-pwrctrl.h
@@ -0,0 +1,40 @@
+/*
+ * ARM SSE CPU PWRCTRL register block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "CPU<N>_PWRCTRL block" which is part of the
+ * Arm Corstone SSE-300 Example Subsystem and documented in
+ * https://developer.arm.com/documentation/101773/0000
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ */
+
+#ifndef HW_MISC_ARMSSE_CPU_PWRCTRL_H
+#define HW_MISC_ARMSSE_CPU_PWRCTRL_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_ARMSSE_CPU_PWRCTRL "armsse-cpu-pwrctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSECPUPwrCtrl, ARMSSE_CPU_PWRCTRL)
+
+struct ARMSSECPUPwrCtrl {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ uint32_t cpupwrcfg;
+};
+
+#endif
diff --git a/include/hw/misc/armsse-cpuid.h b/include/hw/misc/armsse-cpuid.h
index 0ef33fcaba..9c0926322c 100644
--- a/include/hw/misc/armsse-cpuid.h
+++ b/include/hw/misc/armsse-cpuid.h
@@ -12,7 +12,7 @@
/*
* This is a model of the "CPU_IDENTITY" register block which is part of the
* Arm SSE-200 and documented in
- * http://infocenter.arm.com/help/topic/com.arm.doc.101104_0100_00_en/corelink_sse200_subsystem_for_embedded_technical_reference_manual_101104_0100_00_en.pdf
+ * https://developer.arm.com/documentation/101104/latest/
*
* QEMU interface:
* + QOM property "CPUID": the value to use for the CPUID register
@@ -23,11 +23,12 @@
#define HW_MISC_ARMSSE_CPUID_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ARMSSE_CPUID "armsse-cpuid"
-#define ARMSSE_CPUID(obj) OBJECT_CHECK(ARMSSECPUID, (obj), TYPE_ARMSSE_CPUID)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSECPUID, ARMSSE_CPUID)
-typedef struct ARMSSECPUID {
+struct ARMSSECPUID {
/*< private >*/
SysBusDevice parent_obj;
@@ -36,6 +37,6 @@ typedef struct ARMSSECPUID {
/* Properties */
uint32_t cpuid;
-} ARMSSECPUID;
+};
#endif
diff --git a/include/hw/misc/armsse-mhu.h b/include/hw/misc/armsse-mhu.h
index cf5d8a73e6..41925ded89 100644
--- a/include/hw/misc/armsse-mhu.h
+++ b/include/hw/misc/armsse-mhu.h
@@ -12,7 +12,7 @@
/*
* This is a model of the Message Handling Unit (MHU) which is part of the
* Arm SSE-200 and documented in
- * http://infocenter.arm.com/help/topic/com.arm.doc.101104_0100_00_en/corelink_sse200_subsystem_for_embedded_technical_reference_manual_101104_0100_00_en.pdf
+ * https://developer.arm.com/documentation/101104/latest/
*
* QEMU interface:
* + sysbus MMIO region 0: the system information register bank
@@ -24,11 +24,12 @@
#define HW_MISC_ARMSSE_MHU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ARMSSE_MHU "armsse-mhu"
-#define ARMSSE_MHU(obj) OBJECT_CHECK(ARMSSEMHU, (obj), TYPE_ARMSSE_MHU)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMSSEMHU, ARMSSE_MHU)
-typedef struct ARMSSEMHU {
+struct ARMSSEMHU {
/*< private >*/
SysBusDevice parent_obj;
@@ -39,6 +40,6 @@ typedef struct ARMSSEMHU {
uint32_t cpu0intr;
uint32_t cpu1intr;
-} ARMSSEMHU;
+};
#endif
diff --git a/include/hw/misc/armv7m_ras.h b/include/hw/misc/armv7m_ras.h
new file mode 100644
index 0000000000..ba6daccf3f
--- /dev/null
+++ b/include/hw/misc/armv7m_ras.h
@@ -0,0 +1,37 @@
+/*
+ * Arm M-profile RAS (Reliability, Availability and Serviceability) block
+ *
+ * Copyright (c) 2021 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the RAS register block of an M-profile CPU
+ * (the registers starting at 0xE0005000 with ERRFRn).
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ *
+ * The QEMU implementation currently provides "minimal RAS" only.
+ */
+
+#ifndef HW_MISC_ARMV7M_RAS_H
+#define HW_MISC_ARMV7M_RAS_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ARMV7M_RAS "armv7m-ras"
+OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MRAS, ARMV7M_RAS)
+
+struct ARMv7MRAS {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+};
+
+#endif
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
new file mode 100644
index 0000000000..ecb1b67de8
--- /dev/null
+++ b/include/hw/misc/aspeed_hace.h
@@ -0,0 +1,50 @@
+/*
+ * ASPEED Hash and Crypto Engine
+ *
+ * Copyright (C) 2021 IBM Corp.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_HACE_H
+#define ASPEED_HACE_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_HACE "aspeed.hace"
+#define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400"
+#define TYPE_ASPEED_AST2500_HACE TYPE_ASPEED_HACE "-ast2500"
+#define TYPE_ASPEED_AST2600_HACE TYPE_ASPEED_HACE "-ast2600"
+#define TYPE_ASPEED_AST1030_HACE TYPE_ASPEED_HACE "-ast1030"
+
+OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE)
+
+#define ASPEED_HACE_NR_REGS (0x64 >> 2)
+#define ASPEED_HACE_MAX_SG 256 /* max number of entries */
+
+struct AspeedHACEState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ struct iovec iov_cache[ASPEED_HACE_MAX_SG];
+ uint32_t regs[ASPEED_HACE_NR_REGS];
+ uint32_t total_req_len;
+ uint32_t iov_count;
+
+ MemoryRegion *dram_mr;
+ AddressSpace dram_as;
+};
+
+
+struct AspeedHACEClass {
+ SysBusDeviceClass parent_class;
+
+ uint32_t src_mask;
+ uint32_t dest_mask;
+ uint32_t key_mask;
+ uint32_t hash_mask;
+};
+
+#endif /* ASPEED_HACE_H */
diff --git a/include/hw/misc/aspeed_i3c.h b/include/hw/misc/aspeed_i3c.h
new file mode 100644
index 0000000000..39679dfa1a
--- /dev/null
+++ b/include/hw/misc/aspeed_i3c.h
@@ -0,0 +1,48 @@
+/*
+ * ASPEED I3C Controller
+ *
+ * Copyright (C) 2021 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_I3C_H
+#define ASPEED_I3C_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_I3C "aspeed.i3c"
+#define TYPE_ASPEED_I3C_DEVICE "aspeed.i3c.device"
+OBJECT_DECLARE_TYPE(AspeedI3CState, AspeedI3CClass, ASPEED_I3C)
+
+#define ASPEED_I3C_NR_REGS (0x70 >> 2)
+#define ASPEED_I3C_DEVICE_NR_REGS (0x300 >> 2)
+#define ASPEED_I3C_NR_DEVICES 6
+
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedI3CDevice, ASPEED_I3C_DEVICE)
+typedef struct AspeedI3CDevice {
+ /* <private> */
+ SysBusDevice parent;
+
+ /* <public> */
+ MemoryRegion mr;
+ qemu_irq irq;
+
+ uint8_t id;
+ uint32_t regs[ASPEED_I3C_DEVICE_NR_REGS];
+} AspeedI3CDevice;
+
+typedef struct AspeedI3CState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /* <public> */
+ MemoryRegion iomem;
+ MemoryRegion iomem_container;
+ qemu_irq irq;
+
+ uint32_t regs[ASPEED_I3C_NR_REGS];
+ AspeedI3CDevice devices[ASPEED_I3C_NR_DEVICES];
+} AspeedI3CState;
+#endif /* ASPEED_I3C_H */
diff --git a/include/hw/misc/aspeed_lpc.h b/include/hw/misc/aspeed_lpc.h
new file mode 100644
index 0000000000..fa398959af
--- /dev/null
+++ b/include/hw/misc/aspeed_lpc.h
@@ -0,0 +1,45 @@
+/*
+ * ASPEED LPC Controller
+ *
+ * Copyright (C) 2017-2018 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef ASPEED_LPC_H
+#define ASPEED_LPC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_LPC "aspeed.lpc"
+#define ASPEED_LPC(obj) OBJECT_CHECK(AspeedLPCState, (obj), TYPE_ASPEED_LPC)
+
+#define ASPEED_LPC_NR_REGS (0x260 >> 2)
+
+enum aspeed_lpc_subdevice {
+ aspeed_lpc_kcs_1 = 0,
+ aspeed_lpc_kcs_2,
+ aspeed_lpc_kcs_3,
+ aspeed_lpc_kcs_4,
+ aspeed_lpc_ibt,
+};
+
+#define ASPEED_LPC_NR_SUBDEVS 5
+
+typedef struct AspeedLPCState {
+ /* <private> */
+ SysBusDevice parent;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ qemu_irq subdevice_irqs[ASPEED_LPC_NR_SUBDEVS];
+ uint32_t subdevice_irqs_pending;
+
+ uint32_t regs[ASPEED_LPC_NR_REGS];
+ uint32_t hicr7;
+} AspeedLPCState;
+
+#endif /* ASPEED_LPC_H */
diff --git a/include/hw/misc/aspeed_peci.h b/include/hw/misc/aspeed_peci.h
new file mode 100644
index 0000000000..8382707d9f
--- /dev/null
+++ b/include/hw/misc/aspeed_peci.h
@@ -0,0 +1,29 @@
+/*
+ * Aspeed PECI Controller
+ *
+ * Copyright (c) Meta Platforms, Inc. and affiliates. (http://www.meta.com)
+ *
+ * This code is licensed under the GPL version 2 or later. See the COPYING
+ * file in the top-level directory.
+ */
+
+#ifndef ASPEED_PECI_H
+#define ASPEED_PECI_H
+
+#include "hw/sysbus.h"
+
+#define ASPEED_PECI_NR_REGS ((0xFC + 4) >> 2)
+#define TYPE_ASPEED_PECI "aspeed.peci"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedPECIState, ASPEED_PECI);
+
+struct AspeedPECIState {
+ /* <private> */
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ uint32_t regs[ASPEED_PECI_NR_REGS];
+};
+
+#endif
diff --git a/include/hw/misc/aspeed_sbc.h b/include/hw/misc/aspeed_sbc.h
new file mode 100644
index 0000000000..405e6782b9
--- /dev/null
+++ b/include/hw/misc/aspeed_sbc.h
@@ -0,0 +1,45 @@
+/*
+ * ASPEED Secure Boot Controller
+ *
+ * Copyright (C) 2021-2022 IBM Corp.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ASPEED_SBC_H
+#define ASPEED_SBC_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_SBC "aspeed.sbc"
+#define TYPE_ASPEED_AST2600_SBC TYPE_ASPEED_SBC "-ast2600"
+OBJECT_DECLARE_TYPE(AspeedSBCState, AspeedSBCClass, ASPEED_SBC)
+
+#define ASPEED_SBC_NR_REGS (0x93c >> 2)
+
+#define QSR_AES BIT(27)
+#define QSR_RSA1024 (0x0 << 12)
+#define QSR_RSA2048 (0x1 << 12)
+#define QSR_RSA3072 (0x2 << 12)
+#define QSR_RSA4096 (0x3 << 12)
+#define QSR_SHA224 (0x0 << 10)
+#define QSR_SHA256 (0x1 << 10)
+#define QSR_SHA384 (0x2 << 10)
+#define QSR_SHA512 (0x3 << 10)
+
+struct AspeedSBCState {
+ SysBusDevice parent;
+
+ bool emmc_abr;
+ uint32_t signing_settings;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[ASPEED_SBC_NR_REGS];
+};
+
+struct AspeedSBCClass {
+ SysBusDeviceClass parent_class;
+};
+
+#endif /* ASPEED_SBC_H */
diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h
index a6739bb846..7cb6018dbc 100644
--- a/include/hw/misc/aspeed_scu.h
+++ b/include/hw/misc/aspeed_scu.h
@@ -12,17 +12,19 @@
#define ASPEED_SCU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_SCU "aspeed.scu"
-#define ASPEED_SCU(obj) OBJECT_CHECK(AspeedSCUState, (obj), TYPE_ASPEED_SCU)
+OBJECT_DECLARE_TYPE(AspeedSCUState, AspeedSCUClass, ASPEED_SCU)
#define TYPE_ASPEED_2400_SCU TYPE_ASPEED_SCU "-ast2400"
#define TYPE_ASPEED_2500_SCU TYPE_ASPEED_SCU "-ast2500"
#define TYPE_ASPEED_2600_SCU TYPE_ASPEED_SCU "-ast2600"
+#define TYPE_ASPEED_1030_SCU TYPE_ASPEED_SCU "-ast1030"
#define ASPEED_SCU_NR_REGS (0x1A8 >> 2)
#define ASPEED_AST2600_SCU_NR_REGS (0xE20 >> 2)
-typedef struct AspeedSCUState {
+struct AspeedSCUState {
/*< private >*/
SysBusDevice parent_obj;
@@ -34,7 +36,7 @@ typedef struct AspeedSCUState {
uint32_t hw_strap1;
uint32_t hw_strap2;
uint32_t hw_prot_key;
-} AspeedSCUState;
+};
#define AST2400_A0_SILICON_REV 0x02000303U
#define AST2400_A1_SILICON_REV 0x02010303U
@@ -42,25 +44,27 @@ typedef struct AspeedSCUState {
#define AST2500_A1_SILICON_REV 0x04010303U
#define AST2600_A0_SILICON_REV 0x05000303U
#define AST2600_A1_SILICON_REV 0x05010303U
+#define AST2600_A2_SILICON_REV 0x05020303U
+#define AST2600_A3_SILICON_REV 0x05030303U
+#define AST1030_A0_SILICON_REV 0x80000000U
+#define AST1030_A1_SILICON_REV 0x80010000U
#define ASPEED_IS_AST2500(si_rev) ((((si_rev) >> 24) & 0xff) == 0x04)
-extern bool is_supported_silicon_rev(uint32_t silicon_rev);
+bool is_supported_silicon_rev(uint32_t silicon_rev);
-#define ASPEED_SCU_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSCUClass, (klass), TYPE_ASPEED_SCU)
-#define ASPEED_SCU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSCUClass, (obj), TYPE_ASPEED_SCU)
-typedef struct AspeedSCUClass {
+struct AspeedSCUClass {
SysBusDeviceClass parent_class;
const uint32_t *resets;
uint32_t (*calc_hpll)(AspeedSCUState *s, uint32_t hpll_reg);
+ uint32_t (*get_apb)(AspeedSCUState *s);
uint32_t apb_divider;
uint32_t nr_regs;
+ bool clkin_25Mhz;
const MemoryRegionOps *ops;
-} AspeedSCUClass;
+};
#define ASPEED_SCU_PROT_KEY 0x1688A8A8
@@ -286,6 +290,7 @@ uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s);
#define SCU_AST2500_HW_STRAP_ESPI_FLASH_ENABLE (0x1 << 26)
#define SCU_AST2500_HW_STRAP_ESPI_ENABLE (0x1 << 25)
#define SCU_AST2500_HW_STRAP_DDR4_ENABLE (0x1 << 24)
+#define SCU_AST2500_HW_STRAP_25HZ_CLOCK_MODE (0x1 << 23)
#define SCU_AST2500_HW_STRAP_ACPI_ENABLE (0x1 << 19)
#define SCU_AST2500_HW_STRAP_USBCKI_FREQ (0x1 << 18)
@@ -316,4 +321,44 @@ uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s);
SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \
SCU_AST2500_HW_STRAP_RESERVED1)
+/*
+ * SCU200 H-PLL Parameter Register (for Aspeed AST2600 SOC)
+ *
+ * 28:26 H-PLL Parameters
+ * 25 Enable H-PLL reset
+ * 24 Enable H-PLL bypass mode
+ * 23 Turn off H-PLL
+ * 22:19 H-PLL Post Divider (P)
+ * 18:13 H-PLL Numerator (M)
+ * 12:0 H-PLL Denumerator (N)
+ *
+ * (Output frequency) = CLKIN(25MHz) * [(M+1) / (N+1)] / (P+1)
+ *
+ * The default frequency is 1200Mhz when CLKIN = 25MHz
+ */
+#define SCU_AST2600_H_PLL_BYPASS_EN (0x1 << 24)
+#define SCU_AST2600_H_PLL_OFF (0x1 << 23)
+
+/*
+ * SCU310 Clock Selection Register Set 4 (for Aspeed AST1030 SOC)
+ *
+ * 31 I3C Clock Source selection
+ * 30:28 I3C clock divider selection
+ * 26:24 MAC AHB clock divider selection
+ * 22:20 RGMII 125MHz clock divider ration
+ * 19:16 RGMII 50MHz clock divider ration
+ * 15 LHCLK clock generation/output enable control
+ * 14:12 LHCLK divider selection
+ * 11:8 APB Bus PCLK divider selection
+ * 7 Select PECI clock source
+ * 6 Select UART debug port clock source
+ * 5 Select UART6 clock source
+ * 4 Select UART5 clock source
+ * 3 Select UART4 clock source
+ * 2 Select UART3 clock source
+ * 1 Select UART2 clock source
+ * 0 Select UART1 clock source
+ */
+#define SCU_AST1030_CLK_GET_PCLK_DIV(x) (((x) >> 8) & 0xf)
+
#endif /* ASPEED_SCU_H */
diff --git a/include/hw/misc/aspeed_sdmc.h b/include/hw/misc/aspeed_sdmc.h
index cea1e67fe3..ec2d59a14f 100644
--- a/include/hw/misc/aspeed_sdmc.h
+++ b/include/hw/misc/aspeed_sdmc.h
@@ -10,16 +10,28 @@
#define ASPEED_SDMC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_SDMC "aspeed.sdmc"
-#define ASPEED_SDMC(obj) OBJECT_CHECK(AspeedSDMCState, (obj), TYPE_ASPEED_SDMC)
+OBJECT_DECLARE_TYPE(AspeedSDMCState, AspeedSDMCClass, ASPEED_SDMC)
#define TYPE_ASPEED_2400_SDMC TYPE_ASPEED_SDMC "-ast2400"
#define TYPE_ASPEED_2500_SDMC TYPE_ASPEED_SDMC "-ast2500"
#define TYPE_ASPEED_2600_SDMC TYPE_ASPEED_SDMC "-ast2600"
-#define ASPEED_SDMC_NR_REGS (0x174 >> 2)
+/*
+ * SDMC has 174 documented registers. In addition the u-boot device tree
+ * describes the following regions:
+ * - PHY status regs at offset 0x400, length 0x200
+ * - PHY setting regs at offset 0x100, length 0x300
+ *
+ * There are two sets of MRS (Mode Registers) configuration in ast2600 memory
+ * system: one is in the SDRAM MC (memory controller) which is used in run
+ * time, and the other is in the DDR-PHY IP which is used during DDR-PHY
+ * training.
+ */
+#define ASPEED_SDMC_NR_REGS (0x500 >> 2)
-typedef struct AspeedSDMCState {
+struct AspeedSDMCState {
/*< private >*/
SysBusDevice parent_obj;
@@ -29,20 +41,16 @@ typedef struct AspeedSDMCState {
uint32_t regs[ASPEED_SDMC_NR_REGS];
uint64_t ram_size;
uint64_t max_ram_size;
-} AspeedSDMCState;
+};
-#define ASPEED_SDMC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSDMCClass, (klass), TYPE_ASPEED_SDMC)
-#define ASPEED_SDMC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSDMCClass, (obj), TYPE_ASPEED_SDMC)
-typedef struct AspeedSDMCClass {
+struct AspeedSDMCClass {
SysBusDeviceClass parent_class;
uint64_t max_ram_size;
const uint64_t *valid_ram_sizes;
uint32_t (*compute_conf)(AspeedSDMCState *s, uint32_t data);
void (*write)(AspeedSDMCState *s, uint32_t reg, uint32_t data);
-} AspeedSDMCClass;
+};
#endif /* ASPEED_SDMC_H */
diff --git a/include/hw/misc/aspeed_xdma.h b/include/hw/misc/aspeed_xdma.h
index 00b45d931f..b1478fd1c6 100644
--- a/include/hw/misc/aspeed_xdma.h
+++ b/include/hw/misc/aspeed_xdma.h
@@ -3,21 +3,25 @@
* Eddie James <eajames@linux.ibm.com>
*
* Copyright (C) 2019 IBM Corp.
- * SPDX-License-Identifer: GPL-2.0-or-later
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ASPEED_XDMA_H
#define ASPEED_XDMA_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_XDMA "aspeed.xdma"
-#define ASPEED_XDMA(obj) OBJECT_CHECK(AspeedXDMAState, (obj), TYPE_ASPEED_XDMA)
+#define TYPE_ASPEED_2400_XDMA TYPE_ASPEED_XDMA "-ast2400"
+#define TYPE_ASPEED_2500_XDMA TYPE_ASPEED_XDMA "-ast2500"
+#define TYPE_ASPEED_2600_XDMA TYPE_ASPEED_XDMA "-ast2600"
+OBJECT_DECLARE_TYPE(AspeedXDMAState, AspeedXDMAClass, ASPEED_XDMA)
#define ASPEED_XDMA_NUM_REGS (ASPEED_XDMA_REG_SIZE / sizeof(uint32_t))
#define ASPEED_XDMA_REG_SIZE 0x7C
-typedef struct AspeedXDMAState {
+struct AspeedXDMAState {
SysBusDevice parent;
MemoryRegion iomem;
@@ -25,6 +29,18 @@ typedef struct AspeedXDMAState {
char bmc_cmdq_readp_set;
uint32_t regs[ASPEED_XDMA_NUM_REGS];
-} AspeedXDMAState;
+};
+
+struct AspeedXDMAClass {
+ SysBusDeviceClass parent_class;
+
+ uint8_t cmdq_endp;
+ uint8_t cmdq_wrp;
+ uint8_t cmdq_rdp;
+ uint8_t intr_ctrl;
+ uint32_t intr_ctrl_mask;
+ uint8_t intr_status;
+ uint32_t intr_complete;
+};
#endif /* ASPEED_XDMA_H */
diff --git a/include/hw/misc/auxbus.h b/include/hw/misc/auxbus.h
index 15a8973517..03cacdee42 100644
--- a/include/hw/misc/auxbus.h
+++ b/include/hw/misc/auxbus.h
@@ -27,12 +27,14 @@
#include "exec/memory.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct AUXBus AUXBus;
typedef struct AUXSlave AUXSlave;
typedef enum AUXCommand AUXCommand;
typedef enum AUXReply AUXReply;
-typedef struct AUXTOI2CState AUXTOI2CState;
+
+#define TYPE_AUXTOI2C "aux-to-i2c-bridge"
+OBJECT_DECLARE_SIMPLE_TYPE(AUXTOI2CState, AUXTOI2C)
enum AUXCommand {
WRITE_I2C = 0,
@@ -53,7 +55,7 @@ enum AUXReply {
};
#define TYPE_AUX_BUS "aux-bus"
-#define AUX_BUS(obj) OBJECT_CHECK(AUXBus, (obj), TYPE_AUX_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(AUXBus, AUX_BUS)
struct AUXBus {
/* < private > */
@@ -72,8 +74,7 @@ struct AUXBus {
};
#define TYPE_AUX_SLAVE "aux-slave"
-#define AUX_SLAVE(obj) \
- OBJECT_CHECK(AUXSlave, (obj), TYPE_AUX_SLAVE)
+OBJECT_DECLARE_SIMPLE_TYPE(AUXSlave, AUX_SLAVE)
struct AUXSlave {
/* < private > */
@@ -105,7 +106,7 @@ void aux_bus_realize(AUXBus *bus);
*
* Returns the reply of the request.
*
- * @bus Ths bus where the request happen.
+ * @bus The bus where the request happen.
* @cmd The command requested.
* @address The 20bits address of the slave.
* @len The length of the read or write.
diff --git a/include/hw/misc/avr_power.h b/include/hw/misc/avr_power.h
index e08e44f629..388e421aa7 100644
--- a/include/hw/misc/avr_power.h
+++ b/include/hw/misc/avr_power.h
@@ -26,13 +26,13 @@
#define HW_MISC_AVR_POWER_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
+#include "qom/object.h"
#define TYPE_AVR_MASK "avr-power"
-#define AVR_MASK(obj) OBJECT_CHECK(AVRMaskState, (obj), TYPE_AVR_MASK)
+OBJECT_DECLARE_SIMPLE_TYPE(AVRMaskState, AVR_MASK)
-typedef struct {
+struct AVRMaskState {
/* <private> */
SysBusDevice parent_obj;
@@ -41,6 +41,6 @@ typedef struct {
uint8_t val;
qemu_irq irq[8];
-} AVRMaskState;
+};
#endif /* HW_MISC_AVR_POWER_H */
diff --git a/include/hw/misc/bcm2835_cprman.h b/include/hw/misc/bcm2835_cprman.h
new file mode 100644
index 0000000000..0d38036728
--- /dev/null
+++ b/include/hw/misc/bcm2835_cprman.h
@@ -0,0 +1,210 @@
+/*
+ * BCM2835 CPRMAN clock manager
+ *
+ * Copyright (c) 2020 Luc Michel <luc@lmichel.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_BCM2835_CPRMAN_H
+#define HW_MISC_BCM2835_CPRMAN_H
+
+#include "hw/sysbus.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_BCM2835_CPRMAN "bcm2835-cprman"
+
+typedef struct BCM2835CprmanState BCM2835CprmanState;
+
+DECLARE_INSTANCE_CHECKER(BCM2835CprmanState, CPRMAN,
+ TYPE_BCM2835_CPRMAN)
+
+#define CPRMAN_NUM_REGS (0x2000 / sizeof(uint32_t))
+
+typedef enum CprmanPll {
+ CPRMAN_PLLA = 0,
+ CPRMAN_PLLC,
+ CPRMAN_PLLD,
+ CPRMAN_PLLH,
+ CPRMAN_PLLB,
+
+ CPRMAN_NUM_PLL
+} CprmanPll;
+
+typedef enum CprmanPllChannel {
+ CPRMAN_PLLA_CHANNEL_DSI0 = 0,
+ CPRMAN_PLLA_CHANNEL_CORE,
+ CPRMAN_PLLA_CHANNEL_PER,
+ CPRMAN_PLLA_CHANNEL_CCP2,
+
+ CPRMAN_PLLC_CHANNEL_CORE2,
+ CPRMAN_PLLC_CHANNEL_CORE1,
+ CPRMAN_PLLC_CHANNEL_PER,
+ CPRMAN_PLLC_CHANNEL_CORE0,
+
+ CPRMAN_PLLD_CHANNEL_DSI0,
+ CPRMAN_PLLD_CHANNEL_CORE,
+ CPRMAN_PLLD_CHANNEL_PER,
+ CPRMAN_PLLD_CHANNEL_DSI1,
+
+ CPRMAN_PLLH_CHANNEL_AUX,
+ CPRMAN_PLLH_CHANNEL_RCAL,
+ CPRMAN_PLLH_CHANNEL_PIX,
+
+ CPRMAN_PLLB_CHANNEL_ARM,
+
+ CPRMAN_NUM_PLL_CHANNEL,
+
+ /* Special values used when connecting clock sources to clocks */
+ CPRMAN_CLOCK_SRC_NORMAL = -1,
+ CPRMAN_CLOCK_SRC_FORCE_GROUND = -2,
+ CPRMAN_CLOCK_SRC_DSI0HSCK = -3,
+} CprmanPllChannel;
+
+typedef enum CprmanClockMux {
+ CPRMAN_CLOCK_GNRIC,
+ CPRMAN_CLOCK_VPU,
+ CPRMAN_CLOCK_SYS,
+ CPRMAN_CLOCK_PERIA,
+ CPRMAN_CLOCK_PERII,
+ CPRMAN_CLOCK_H264,
+ CPRMAN_CLOCK_ISP,
+ CPRMAN_CLOCK_V3D,
+ CPRMAN_CLOCK_CAM0,
+ CPRMAN_CLOCK_CAM1,
+ CPRMAN_CLOCK_CCP2,
+ CPRMAN_CLOCK_DSI0E,
+ CPRMAN_CLOCK_DSI0P,
+ CPRMAN_CLOCK_DPI,
+ CPRMAN_CLOCK_GP0,
+ CPRMAN_CLOCK_GP1,
+ CPRMAN_CLOCK_GP2,
+ CPRMAN_CLOCK_HSM,
+ CPRMAN_CLOCK_OTP,
+ CPRMAN_CLOCK_PCM,
+ CPRMAN_CLOCK_PWM,
+ CPRMAN_CLOCK_SLIM,
+ CPRMAN_CLOCK_SMI,
+ CPRMAN_CLOCK_TEC,
+ CPRMAN_CLOCK_TD0,
+ CPRMAN_CLOCK_TD1,
+ CPRMAN_CLOCK_TSENS,
+ CPRMAN_CLOCK_TIMER,
+ CPRMAN_CLOCK_UART,
+ CPRMAN_CLOCK_VEC,
+ CPRMAN_CLOCK_PULSE,
+ CPRMAN_CLOCK_SDC,
+ CPRMAN_CLOCK_ARM,
+ CPRMAN_CLOCK_AVEO,
+ CPRMAN_CLOCK_EMMC,
+ CPRMAN_CLOCK_EMMC2,
+
+ CPRMAN_NUM_CLOCK_MUX
+} CprmanClockMux;
+
+typedef enum CprmanClockMuxSource {
+ CPRMAN_CLOCK_SRC_GND = 0,
+ CPRMAN_CLOCK_SRC_XOSC,
+ CPRMAN_CLOCK_SRC_TD0,
+ CPRMAN_CLOCK_SRC_TD1,
+ CPRMAN_CLOCK_SRC_PLLA,
+ CPRMAN_CLOCK_SRC_PLLC,
+ CPRMAN_CLOCK_SRC_PLLD,
+ CPRMAN_CLOCK_SRC_PLLH,
+ CPRMAN_CLOCK_SRC_PLLC_CORE1,
+ CPRMAN_CLOCK_SRC_PLLC_CORE2,
+
+ CPRMAN_NUM_CLOCK_MUX_SRC
+} CprmanClockMuxSource;
+
+typedef struct CprmanPllState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanPll id;
+
+ uint32_t *reg_cm;
+ uint32_t *reg_a2w_ctrl;
+ uint32_t *reg_a2w_ana; /* ANA[0] .. ANA[3] */
+ uint32_t prediv_mask; /* prediv bit in ana[1] */
+ uint32_t *reg_a2w_frac;
+
+ Clock *xosc_in;
+ Clock *out;
+} CprmanPllState;
+
+typedef struct CprmanPllChannelState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanPllChannel id;
+ CprmanPll parent;
+
+ uint32_t *reg_cm;
+ uint32_t hold_mask;
+ uint32_t load_mask;
+ uint32_t *reg_a2w_ctrl;
+ int fixed_divider;
+
+ Clock *pll_in;
+ Clock *out;
+} CprmanPllChannelState;
+
+typedef struct CprmanClockMuxState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanClockMux id;
+
+ uint32_t *reg_ctl;
+ uint32_t *reg_div;
+ int int_bits;
+ int frac_bits;
+
+ Clock *srcs[CPRMAN_NUM_CLOCK_MUX_SRC];
+ Clock *out;
+
+ /*
+ * Used by clock srcs update callback to retrieve both the clock and the
+ * source number.
+ */
+ struct CprmanClockMuxState *backref[CPRMAN_NUM_CLOCK_MUX_SRC];
+} CprmanClockMuxState;
+
+typedef struct CprmanDsi0HsckMuxState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CprmanClockMux id;
+
+ uint32_t *reg_cm;
+
+ Clock *plla_in;
+ Clock *plld_in;
+ Clock *out;
+} CprmanDsi0HsckMuxState;
+
+struct BCM2835CprmanState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ CprmanPllState plls[CPRMAN_NUM_PLL];
+ CprmanPllChannelState channels[CPRMAN_NUM_PLL_CHANNEL];
+ CprmanClockMuxState clock_muxes[CPRMAN_NUM_CLOCK_MUX];
+ CprmanDsi0HsckMuxState dsi0hsck_mux;
+
+ uint32_t regs[CPRMAN_NUM_REGS];
+ uint32_t xosc_freq;
+
+ Clock *xosc;
+ Clock *gnd;
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_cprman_internals.h b/include/hw/misc/bcm2835_cprman_internals.h
new file mode 100644
index 0000000000..7617aff96f
--- /dev/null
+++ b/include/hw/misc/bcm2835_cprman_internals.h
@@ -0,0 +1,1019 @@
+/*
+ * BCM2835 CPRMAN clock manager
+ *
+ * Copyright (c) 2020 Luc Michel <luc@lmichel.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_BCM2835_CPRMAN_INTERNALS_H
+#define HW_MISC_BCM2835_CPRMAN_INTERNALS_H
+
+#include "hw/registerfields.h"
+#include "hw/misc/bcm2835_cprman.h"
+
+#define TYPE_CPRMAN_PLL "bcm2835-cprman-pll"
+#define TYPE_CPRMAN_PLL_CHANNEL "bcm2835-cprman-pll-channel"
+#define TYPE_CPRMAN_CLOCK_MUX "bcm2835-cprman-clock-mux"
+#define TYPE_CPRMAN_DSI0HSCK_MUX "bcm2835-cprman-dsi0hsck-mux"
+
+DECLARE_INSTANCE_CHECKER(CprmanPllState, CPRMAN_PLL,
+ TYPE_CPRMAN_PLL)
+DECLARE_INSTANCE_CHECKER(CprmanPllChannelState, CPRMAN_PLL_CHANNEL,
+ TYPE_CPRMAN_PLL_CHANNEL)
+DECLARE_INSTANCE_CHECKER(CprmanClockMuxState, CPRMAN_CLOCK_MUX,
+ TYPE_CPRMAN_CLOCK_MUX)
+DECLARE_INSTANCE_CHECKER(CprmanDsi0HsckMuxState, CPRMAN_DSI0HSCK_MUX,
+ TYPE_CPRMAN_DSI0HSCK_MUX)
+
+/* Register map */
+
+/* PLLs */
+REG32(CM_PLLA, 0x104)
+ FIELD(CM_PLLA, LOADDSI0, 0, 1)
+ FIELD(CM_PLLA, HOLDDSI0, 1, 1)
+ FIELD(CM_PLLA, LOADCCP2, 2, 1)
+ FIELD(CM_PLLA, HOLDCCP2, 3, 1)
+ FIELD(CM_PLLA, LOADCORE, 4, 1)
+ FIELD(CM_PLLA, HOLDCORE, 5, 1)
+ FIELD(CM_PLLA, LOADPER, 6, 1)
+ FIELD(CM_PLLA, HOLDPER, 7, 1)
+ FIELD(CM_PLLx, ANARST, 8, 1)
+REG32(CM_PLLC, 0x108)
+ FIELD(CM_PLLC, LOADCORE0, 0, 1)
+ FIELD(CM_PLLC, HOLDCORE0, 1, 1)
+ FIELD(CM_PLLC, LOADCORE1, 2, 1)
+ FIELD(CM_PLLC, HOLDCORE1, 3, 1)
+ FIELD(CM_PLLC, LOADCORE2, 4, 1)
+ FIELD(CM_PLLC, HOLDCORE2, 5, 1)
+ FIELD(CM_PLLC, LOADPER, 6, 1)
+ FIELD(CM_PLLC, HOLDPER, 7, 1)
+REG32(CM_PLLD, 0x10c)
+ FIELD(CM_PLLD, LOADDSI0, 0, 1)
+ FIELD(CM_PLLD, HOLDDSI0, 1, 1)
+ FIELD(CM_PLLD, LOADDSI1, 2, 1)
+ FIELD(CM_PLLD, HOLDDSI1, 3, 1)
+ FIELD(CM_PLLD, LOADCORE, 4, 1)
+ FIELD(CM_PLLD, HOLDCORE, 5, 1)
+ FIELD(CM_PLLD, LOADPER, 6, 1)
+ FIELD(CM_PLLD, HOLDPER, 7, 1)
+REG32(CM_PLLH, 0x110)
+ FIELD(CM_PLLH, LOADPIX, 0, 1)
+ FIELD(CM_PLLH, LOADAUX, 1, 1)
+ FIELD(CM_PLLH, LOADRCAL, 2, 1)
+REG32(CM_PLLB, 0x170)
+ FIELD(CM_PLLB, LOADARM, 0, 1)
+ FIELD(CM_PLLB, HOLDARM, 1, 1)
+
+REG32(A2W_PLLA_CTRL, 0x1100)
+ FIELD(A2W_PLLx_CTRL, NDIV, 0, 10)
+ FIELD(A2W_PLLx_CTRL, PDIV, 12, 3)
+ FIELD(A2W_PLLx_CTRL, PWRDN, 16, 1)
+ FIELD(A2W_PLLx_CTRL, PRST_DISABLE, 17, 1)
+REG32(A2W_PLLC_CTRL, 0x1120)
+REG32(A2W_PLLD_CTRL, 0x1140)
+REG32(A2W_PLLH_CTRL, 0x1160)
+REG32(A2W_PLLB_CTRL, 0x11e0)
+
+REG32(A2W_PLLA_ANA0, 0x1010)
+REG32(A2W_PLLA_ANA1, 0x1014)
+ FIELD(A2W_PLLx_ANA1, FB_PREDIV, 14, 1)
+REG32(A2W_PLLA_ANA2, 0x1018)
+REG32(A2W_PLLA_ANA3, 0x101c)
+
+REG32(A2W_PLLC_ANA0, 0x1030)
+REG32(A2W_PLLC_ANA1, 0x1034)
+REG32(A2W_PLLC_ANA2, 0x1038)
+REG32(A2W_PLLC_ANA3, 0x103c)
+
+REG32(A2W_PLLD_ANA0, 0x1050)
+REG32(A2W_PLLD_ANA1, 0x1054)
+REG32(A2W_PLLD_ANA2, 0x1058)
+REG32(A2W_PLLD_ANA3, 0x105c)
+
+REG32(A2W_PLLH_ANA0, 0x1070)
+REG32(A2W_PLLH_ANA1, 0x1074)
+ FIELD(A2W_PLLH_ANA1, FB_PREDIV, 11, 1)
+REG32(A2W_PLLH_ANA2, 0x1078)
+REG32(A2W_PLLH_ANA3, 0x107c)
+
+REG32(A2W_PLLB_ANA0, 0x10f0)
+REG32(A2W_PLLB_ANA1, 0x10f4)
+REG32(A2W_PLLB_ANA2, 0x10f8)
+REG32(A2W_PLLB_ANA3, 0x10fc)
+
+REG32(A2W_PLLA_FRAC, 0x1200)
+ FIELD(A2W_PLLx_FRAC, FRAC, 0, 20)
+REG32(A2W_PLLC_FRAC, 0x1220)
+REG32(A2W_PLLD_FRAC, 0x1240)
+REG32(A2W_PLLH_FRAC, 0x1260)
+REG32(A2W_PLLB_FRAC, 0x12e0)
+
+/* PLL channels */
+REG32(A2W_PLLA_DSI0, 0x1300)
+ FIELD(A2W_PLLx_CHANNELy, DIV, 0, 8)
+ FIELD(A2W_PLLx_CHANNELy, DISABLE, 8, 1)
+REG32(A2W_PLLA_CORE, 0x1400)
+REG32(A2W_PLLA_PER, 0x1500)
+REG32(A2W_PLLA_CCP2, 0x1600)
+
+REG32(A2W_PLLC_CORE2, 0x1320)
+REG32(A2W_PLLC_CORE1, 0x1420)
+REG32(A2W_PLLC_PER, 0x1520)
+REG32(A2W_PLLC_CORE0, 0x1620)
+
+REG32(A2W_PLLD_DSI0, 0x1340)
+REG32(A2W_PLLD_CORE, 0x1440)
+REG32(A2W_PLLD_PER, 0x1540)
+REG32(A2W_PLLD_DSI1, 0x1640)
+
+REG32(A2W_PLLH_AUX, 0x1360)
+REG32(A2W_PLLH_RCAL, 0x1460)
+REG32(A2W_PLLH_PIX, 0x1560)
+REG32(A2W_PLLH_STS, 0x1660)
+
+REG32(A2W_PLLB_ARM, 0x13e0)
+
+/* Clock muxes */
+REG32(CM_GNRICCTL, 0x000)
+ FIELD(CM_CLOCKx_CTL, SRC, 0, 4)
+ FIELD(CM_CLOCKx_CTL, ENABLE, 4, 1)
+ FIELD(CM_CLOCKx_CTL, KILL, 5, 1)
+ FIELD(CM_CLOCKx_CTL, GATE, 6, 1)
+ FIELD(CM_CLOCKx_CTL, BUSY, 7, 1)
+ FIELD(CM_CLOCKx_CTL, BUSYD, 8, 1)
+ FIELD(CM_CLOCKx_CTL, MASH, 9, 2)
+ FIELD(CM_CLOCKx_CTL, FLIP, 11, 1)
+REG32(CM_GNRICDIV, 0x004)
+ FIELD(CM_CLOCKx_DIV, FRAC, 0, 12)
+REG32(CM_VPUCTL, 0x008)
+REG32(CM_VPUDIV, 0x00c)
+REG32(CM_SYSCTL, 0x010)
+REG32(CM_SYSDIV, 0x014)
+REG32(CM_PERIACTL, 0x018)
+REG32(CM_PERIADIV, 0x01c)
+REG32(CM_PERIICTL, 0x020)
+REG32(CM_PERIIDIV, 0x024)
+REG32(CM_H264CTL, 0x028)
+REG32(CM_H264DIV, 0x02c)
+REG32(CM_ISPCTL, 0x030)
+REG32(CM_ISPDIV, 0x034)
+REG32(CM_V3DCTL, 0x038)
+REG32(CM_V3DDIV, 0x03c)
+REG32(CM_CAM0CTL, 0x040)
+REG32(CM_CAM0DIV, 0x044)
+REG32(CM_CAM1CTL, 0x048)
+REG32(CM_CAM1DIV, 0x04c)
+REG32(CM_CCP2CTL, 0x050)
+REG32(CM_CCP2DIV, 0x054)
+REG32(CM_DSI0ECTL, 0x058)
+REG32(CM_DSI0EDIV, 0x05c)
+REG32(CM_DSI0PCTL, 0x060)
+REG32(CM_DSI0PDIV, 0x064)
+REG32(CM_DPICTL, 0x068)
+REG32(CM_DPIDIV, 0x06c)
+REG32(CM_GP0CTL, 0x070)
+REG32(CM_GP0DIV, 0x074)
+REG32(CM_GP1CTL, 0x078)
+REG32(CM_GP1DIV, 0x07c)
+REG32(CM_GP2CTL, 0x080)
+REG32(CM_GP2DIV, 0x084)
+REG32(CM_HSMCTL, 0x088)
+REG32(CM_HSMDIV, 0x08c)
+REG32(CM_OTPCTL, 0x090)
+REG32(CM_OTPDIV, 0x094)
+REG32(CM_PCMCTL, 0x098)
+REG32(CM_PCMDIV, 0x09c)
+REG32(CM_PWMCTL, 0x0a0)
+REG32(CM_PWMDIV, 0x0a4)
+REG32(CM_SLIMCTL, 0x0a8)
+REG32(CM_SLIMDIV, 0x0ac)
+REG32(CM_SMICTL, 0x0b0)
+REG32(CM_SMIDIV, 0x0b4)
+REG32(CM_TCNTCTL, 0x0c0)
+REG32(CM_TCNTCNT, 0x0c4)
+REG32(CM_TECCTL, 0x0c8)
+REG32(CM_TECDIV, 0x0cc)
+REG32(CM_TD0CTL, 0x0d0)
+REG32(CM_TD0DIV, 0x0d4)
+REG32(CM_TD1CTL, 0x0d8)
+REG32(CM_TD1DIV, 0x0dc)
+REG32(CM_TSENSCTL, 0x0e0)
+REG32(CM_TSENSDIV, 0x0e4)
+REG32(CM_TIMERCTL, 0x0e8)
+REG32(CM_TIMERDIV, 0x0ec)
+REG32(CM_UARTCTL, 0x0f0)
+REG32(CM_UARTDIV, 0x0f4)
+REG32(CM_VECCTL, 0x0f8)
+REG32(CM_VECDIV, 0x0fc)
+REG32(CM_PULSECTL, 0x190)
+REG32(CM_PULSEDIV, 0x194)
+REG32(CM_SDCCTL, 0x1a8)
+REG32(CM_SDCDIV, 0x1ac)
+REG32(CM_ARMCTL, 0x1b0)
+REG32(CM_AVEOCTL, 0x1b8)
+REG32(CM_AVEODIV, 0x1bc)
+REG32(CM_EMMCCTL, 0x1c0)
+REG32(CM_EMMCDIV, 0x1c4)
+REG32(CM_EMMC2CTL, 0x1d0)
+REG32(CM_EMMC2DIV, 0x1d4)
+
+/* misc registers */
+REG32(CM_LOCK, 0x114)
+ FIELD(CM_LOCK, FLOCKH, 12, 1)
+ FIELD(CM_LOCK, FLOCKD, 11, 1)
+ FIELD(CM_LOCK, FLOCKC, 10, 1)
+ FIELD(CM_LOCK, FLOCKB, 9, 1)
+ FIELD(CM_LOCK, FLOCKA, 8, 1)
+
+REG32(CM_DSI0HSCK, 0x120)
+ FIELD(CM_DSI0HSCK, SELPLLD, 0, 1)
+
+/*
+ * This field is common to all registers. Each register write value must match
+ * the CPRMAN_PASSWORD magic value in its 8 MSB.
+ */
+FIELD(CPRMAN, PASSWORD, 24, 8)
+#define CPRMAN_PASSWORD 0x5a
+
+/* PLL init info */
+typedef struct PLLInitInfo {
+ const char *name;
+ size_t cm_offset;
+ size_t a2w_ctrl_offset;
+ size_t a2w_ana_offset;
+ uint32_t prediv_mask; /* Prediv bit in ana[1] */
+ size_t a2w_frac_offset;
+} PLLInitInfo;
+
+#define FILL_PLL_INIT_INFO(pll_) \
+ .cm_offset = R_CM_ ## pll_, \
+ .a2w_ctrl_offset = R_A2W_ ## pll_ ## _CTRL, \
+ .a2w_ana_offset = R_A2W_ ## pll_ ## _ANA0, \
+ .a2w_frac_offset = R_A2W_ ## pll_ ## _FRAC
+
+static const PLLInitInfo PLL_INIT_INFO[] = {
+ [CPRMAN_PLLA] = {
+ .name = "plla",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLA),
+ },
+ [CPRMAN_PLLC] = {
+ .name = "pllc",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLC),
+ },
+ [CPRMAN_PLLD] = {
+ .name = "plld",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLD),
+ },
+ [CPRMAN_PLLH] = {
+ .name = "pllh",
+ .prediv_mask = R_A2W_PLLH_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLH),
+ },
+ [CPRMAN_PLLB] = {
+ .name = "pllb",
+ .prediv_mask = R_A2W_PLLx_ANA1_FB_PREDIV_MASK,
+ FILL_PLL_INIT_INFO(PLLB),
+ },
+};
+
+#undef FILL_PLL_CHANNEL_INIT_INFO
+
+static inline void set_pll_init_info(BCM2835CprmanState *s,
+ CprmanPllState *pll,
+ CprmanPll id)
+{
+ pll->id = id;
+ pll->reg_cm = &s->regs[PLL_INIT_INFO[id].cm_offset];
+ pll->reg_a2w_ctrl = &s->regs[PLL_INIT_INFO[id].a2w_ctrl_offset];
+ pll->reg_a2w_ana = &s->regs[PLL_INIT_INFO[id].a2w_ana_offset];
+ pll->prediv_mask = PLL_INIT_INFO[id].prediv_mask;
+ pll->reg_a2w_frac = &s->regs[PLL_INIT_INFO[id].a2w_frac_offset];
+}
+
+
+/* PLL channel init info */
+typedef struct PLLChannelInitInfo {
+ const char *name;
+ CprmanPll parent;
+ size_t cm_offset;
+ uint32_t cm_hold_mask;
+ uint32_t cm_load_mask;
+ size_t a2w_ctrl_offset;
+ unsigned int fixed_divider;
+} PLLChannelInitInfo;
+
+#define FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_) \
+ .parent = CPRMAN_ ## pll_, \
+ .cm_offset = R_CM_ ## pll_, \
+ .cm_load_mask = R_CM_ ## pll_ ## _ ## LOAD ## channel_ ## _MASK, \
+ .a2w_ctrl_offset = R_A2W_ ## pll_ ## _ ## channel_
+
+#define FILL_PLL_CHANNEL_INIT_INFO(pll_, channel_) \
+ FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_), \
+ .cm_hold_mask = R_CM_ ## pll_ ## _ ## HOLD ## channel_ ## _MASK, \
+ .fixed_divider = 1
+
+#define FILL_PLL_CHANNEL_INIT_INFO_nohold(pll_, channel_) \
+ FILL_PLL_CHANNEL_INIT_INFO_common(pll_, channel_), \
+ .cm_hold_mask = 0
+
+static PLLChannelInitInfo PLL_CHANNEL_INIT_INFO[] = {
+ [CPRMAN_PLLA_CHANNEL_DSI0] = {
+ .name = "plla-dsi0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, DSI0),
+ },
+ [CPRMAN_PLLA_CHANNEL_CORE] = {
+ .name = "plla-core",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, CORE),
+ },
+ [CPRMAN_PLLA_CHANNEL_PER] = {
+ .name = "plla-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, PER),
+ },
+ [CPRMAN_PLLA_CHANNEL_CCP2] = {
+ .name = "plla-ccp2",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLA, CCP2),
+ },
+
+ [CPRMAN_PLLC_CHANNEL_CORE2] = {
+ .name = "pllc-core2",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE2),
+ },
+ [CPRMAN_PLLC_CHANNEL_CORE1] = {
+ .name = "pllc-core1",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE1),
+ },
+ [CPRMAN_PLLC_CHANNEL_PER] = {
+ .name = "pllc-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, PER),
+ },
+ [CPRMAN_PLLC_CHANNEL_CORE0] = {
+ .name = "pllc-core0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLC, CORE0),
+ },
+
+ [CPRMAN_PLLD_CHANNEL_DSI0] = {
+ .name = "plld-dsi0",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, DSI0),
+ },
+ [CPRMAN_PLLD_CHANNEL_CORE] = {
+ .name = "plld-core",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, CORE),
+ },
+ [CPRMAN_PLLD_CHANNEL_PER] = {
+ .name = "plld-per",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, PER),
+ },
+ [CPRMAN_PLLD_CHANNEL_DSI1] = {
+ .name = "plld-dsi1",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLD, DSI1),
+ },
+
+ [CPRMAN_PLLH_CHANNEL_AUX] = {
+ .name = "pllh-aux",
+ .fixed_divider = 1,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, AUX),
+ },
+ [CPRMAN_PLLH_CHANNEL_RCAL] = {
+ .name = "pllh-rcal",
+ .fixed_divider = 10,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, RCAL),
+ },
+ [CPRMAN_PLLH_CHANNEL_PIX] = {
+ .name = "pllh-pix",
+ .fixed_divider = 10,
+ FILL_PLL_CHANNEL_INIT_INFO_nohold(PLLH, PIX),
+ },
+
+ [CPRMAN_PLLB_CHANNEL_ARM] = {
+ .name = "pllb-arm",
+ FILL_PLL_CHANNEL_INIT_INFO(PLLB, ARM),
+ },
+};
+
+#undef FILL_PLL_CHANNEL_INIT_INFO_nohold
+#undef FILL_PLL_CHANNEL_INIT_INFO
+#undef FILL_PLL_CHANNEL_INIT_INFO_common
+
+static inline void set_pll_channel_init_info(BCM2835CprmanState *s,
+ CprmanPllChannelState *channel,
+ CprmanPllChannel id)
+{
+ channel->id = id;
+ channel->parent = PLL_CHANNEL_INIT_INFO[id].parent;
+ channel->reg_cm = &s->regs[PLL_CHANNEL_INIT_INFO[id].cm_offset];
+ channel->hold_mask = PLL_CHANNEL_INIT_INFO[id].cm_hold_mask;
+ channel->load_mask = PLL_CHANNEL_INIT_INFO[id].cm_load_mask;
+ channel->reg_a2w_ctrl = &s->regs[PLL_CHANNEL_INIT_INFO[id].a2w_ctrl_offset];
+ channel->fixed_divider = PLL_CHANNEL_INIT_INFO[id].fixed_divider;
+}
+
+/* Clock mux init info */
+typedef struct ClockMuxInitInfo {
+ const char *name;
+ size_t cm_offset; /* cm_offset[0]->CM_CTL, cm_offset[1]->CM_DIV */
+ int int_bits;
+ int frac_bits;
+
+ CprmanPllChannel src_mapping[CPRMAN_NUM_CLOCK_MUX_SRC];
+} ClockMuxInitInfo;
+
+/*
+ * Each clock mux can have up to 10 sources. Sources 0 to 3 are always the
+ * same (ground, xosc, td0, td1). Sources 4 to 9 are mux specific, and are not
+ * always populated. The following macros catch all those cases.
+ */
+
+/* Unknown mapping. Connect everything to ground */
+#define SRC_MAPPING_INFO_unknown \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* gnd */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* xosc */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* test debug 0 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* test debug 1 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll a */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll d */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll h */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c, core1 */ \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, /* pll c, core2 */ \
+ }
+
+/* Only the oscillator and the two test debug clocks */
+#define SRC_MAPPING_INFO_xosc \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/* All the PLL "core" channels */
+#define SRC_MAPPING_INFO_core \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLA_CHANNEL_CORE, \
+ CPRMAN_PLLC_CHANNEL_CORE0, \
+ CPRMAN_PLLD_CHANNEL_CORE, \
+ CPRMAN_PLLH_CHANNEL_AUX, \
+ CPRMAN_PLLC_CHANNEL_CORE1, \
+ CPRMAN_PLLC_CHANNEL_CORE2, \
+ }
+
+/* All the PLL "per" channels */
+#define SRC_MAPPING_INFO_periph \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLA_CHANNEL_PER, \
+ CPRMAN_PLLC_CHANNEL_PER, \
+ CPRMAN_PLLD_CHANNEL_PER, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/*
+ * The DSI0 channels. This one got an intermediate mux between the PLL channels
+ * and the clock input.
+ */
+#define SRC_MAPPING_INFO_dsi0 \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_DSI0HSCK, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+/* The DSI1 channel */
+#define SRC_MAPPING_INFO_dsi1 \
+ .src_mapping = { \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_CLOCK_SRC_NORMAL, \
+ CPRMAN_PLLD_CHANNEL_DSI1, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ CPRMAN_CLOCK_SRC_FORCE_GROUND, \
+ }
+
+#define FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO(kind_) \
+ SRC_MAPPING_INFO_ ## kind_
+
+#define FILL_CLOCK_MUX_INIT_INFO(clock_, kind_) \
+ .cm_offset = R_CM_ ## clock_ ## CTL, \
+ FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO(kind_)
+
+static ClockMuxInitInfo CLOCK_MUX_INIT_INFO[] = {
+ [CPRMAN_CLOCK_GNRIC] = {
+ .name = "gnric",
+ FILL_CLOCK_MUX_INIT_INFO(GNRIC, unknown),
+ },
+ [CPRMAN_CLOCK_VPU] = {
+ .name = "vpu",
+ .int_bits = 12,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(VPU, core),
+ },
+ [CPRMAN_CLOCK_SYS] = {
+ .name = "sys",
+ FILL_CLOCK_MUX_INIT_INFO(SYS, unknown),
+ },
+ [CPRMAN_CLOCK_PERIA] = {
+ .name = "peria",
+ FILL_CLOCK_MUX_INIT_INFO(PERIA, unknown),
+ },
+ [CPRMAN_CLOCK_PERII] = {
+ .name = "perii",
+ FILL_CLOCK_MUX_INIT_INFO(PERII, unknown),
+ },
+ [CPRMAN_CLOCK_H264] = {
+ .name = "h264",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(H264, core),
+ },
+ [CPRMAN_CLOCK_ISP] = {
+ .name = "isp",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(ISP, core),
+ },
+ [CPRMAN_CLOCK_V3D] = {
+ .name = "v3d",
+ FILL_CLOCK_MUX_INIT_INFO(V3D, core),
+ },
+ [CPRMAN_CLOCK_CAM0] = {
+ .name = "cam0",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(CAM0, periph),
+ },
+ [CPRMAN_CLOCK_CAM1] = {
+ .name = "cam1",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(CAM1, periph),
+ },
+ [CPRMAN_CLOCK_CCP2] = {
+ .name = "ccp2",
+ FILL_CLOCK_MUX_INIT_INFO(CCP2, unknown),
+ },
+ [CPRMAN_CLOCK_DSI0E] = {
+ .name = "dsi0e",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(DSI0E, dsi0),
+ },
+ [CPRMAN_CLOCK_DSI0P] = {
+ .name = "dsi0p",
+ .int_bits = 0,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(DSI0P, dsi0),
+ },
+ [CPRMAN_CLOCK_DPI] = {
+ .name = "dpi",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(DPI, periph),
+ },
+ [CPRMAN_CLOCK_GP0] = {
+ .name = "gp0",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP0, periph),
+ },
+ [CPRMAN_CLOCK_GP1] = {
+ .name = "gp1",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP1, periph),
+ },
+ [CPRMAN_CLOCK_GP2] = {
+ .name = "gp2",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(GP2, periph),
+ },
+ [CPRMAN_CLOCK_HSM] = {
+ .name = "hsm",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(HSM, periph),
+ },
+ [CPRMAN_CLOCK_OTP] = {
+ .name = "otp",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(OTP, xosc),
+ },
+ [CPRMAN_CLOCK_PCM] = {
+ .name = "pcm",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(PCM, periph),
+ },
+ [CPRMAN_CLOCK_PWM] = {
+ .name = "pwm",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(PWM, periph),
+ },
+ [CPRMAN_CLOCK_SLIM] = {
+ .name = "slim",
+ .int_bits = 12,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(SLIM, periph),
+ },
+ [CPRMAN_CLOCK_SMI] = {
+ .name = "smi",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(SMI, periph),
+ },
+ [CPRMAN_CLOCK_TEC] = {
+ .name = "tec",
+ .int_bits = 6,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(TEC, xosc),
+ },
+ [CPRMAN_CLOCK_TD0] = {
+ .name = "td0",
+ FILL_CLOCK_MUX_INIT_INFO(TD0, unknown),
+ },
+ [CPRMAN_CLOCK_TD1] = {
+ .name = "td1",
+ FILL_CLOCK_MUX_INIT_INFO(TD1, unknown),
+ },
+ [CPRMAN_CLOCK_TSENS] = {
+ .name = "tsens",
+ .int_bits = 5,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(TSENS, xosc),
+ },
+ [CPRMAN_CLOCK_TIMER] = {
+ .name = "timer",
+ .int_bits = 6,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(TIMER, xosc),
+ },
+ [CPRMAN_CLOCK_UART] = {
+ .name = "uart",
+ .int_bits = 10,
+ .frac_bits = 12,
+ FILL_CLOCK_MUX_INIT_INFO(UART, periph),
+ },
+ [CPRMAN_CLOCK_VEC] = {
+ .name = "vec",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(VEC, periph),
+ },
+ [CPRMAN_CLOCK_PULSE] = {
+ .name = "pulse",
+ FILL_CLOCK_MUX_INIT_INFO(PULSE, xosc),
+ },
+ [CPRMAN_CLOCK_SDC] = {
+ .name = "sdram",
+ .int_bits = 6,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(SDC, core),
+ },
+ [CPRMAN_CLOCK_ARM] = {
+ .name = "arm",
+ FILL_CLOCK_MUX_INIT_INFO(ARM, unknown),
+ },
+ [CPRMAN_CLOCK_AVEO] = {
+ .name = "aveo",
+ .int_bits = 4,
+ .frac_bits = 0,
+ FILL_CLOCK_MUX_INIT_INFO(AVEO, periph),
+ },
+ [CPRMAN_CLOCK_EMMC] = {
+ .name = "emmc",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(EMMC, periph),
+ },
+ [CPRMAN_CLOCK_EMMC2] = {
+ .name = "emmc2",
+ .int_bits = 4,
+ .frac_bits = 8,
+ FILL_CLOCK_MUX_INIT_INFO(EMMC2, unknown),
+ },
+};
+
+#undef FILL_CLOCK_MUX_INIT_INFO
+#undef FILL_CLOCK_MUX_SRC_MAPPING_INIT_INFO
+#undef SRC_MAPPING_INFO_dsi1
+#undef SRC_MAPPING_INFO_dsi0
+#undef SRC_MAPPING_INFO_periph
+#undef SRC_MAPPING_INFO_core
+#undef SRC_MAPPING_INFO_xosc
+#undef SRC_MAPPING_INFO_unknown
+
+static inline void set_clock_mux_init_info(BCM2835CprmanState *s,
+ CprmanClockMuxState *mux,
+ CprmanClockMux id)
+{
+ mux->id = id;
+ mux->reg_ctl = &s->regs[CLOCK_MUX_INIT_INFO[id].cm_offset];
+ mux->reg_div = &s->regs[CLOCK_MUX_INIT_INFO[id].cm_offset + 1];
+ mux->int_bits = CLOCK_MUX_INIT_INFO[id].int_bits;
+ mux->frac_bits = CLOCK_MUX_INIT_INFO[id].frac_bits;
+}
+
+
+/*
+ * Object reset info
+ * Those values have been dumped from a Raspberry Pi 3 Model B v1.2 using the
+ * clk debugfs interface in Linux.
+ */
+typedef struct PLLResetInfo {
+ uint32_t cm;
+ uint32_t a2w_ctrl;
+ uint32_t a2w_ana[4];
+ uint32_t a2w_frac;
+} PLLResetInfo;
+
+static const PLLResetInfo PLL_RESET_INFO[] = {
+ [CPRMAN_PLLA] = {
+ .cm = 0x0000008a,
+ .a2w_ctrl = 0x0002103a,
+ .a2w_frac = 0x00098000,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLC] = {
+ .cm = 0x00000228,
+ .a2w_ctrl = 0x0002103e,
+ .a2w_frac = 0x00080000,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLD] = {
+ .cm = 0x0000020a,
+ .a2w_ctrl = 0x00021034,
+ .a2w_frac = 0x00015556,
+ .a2w_ana = { 0x00000000, 0x00144000, 0x00000000, 0x00000100 }
+ },
+
+ [CPRMAN_PLLH] = {
+ .cm = 0x00000000,
+ .a2w_ctrl = 0x0002102d,
+ .a2w_frac = 0x00000000,
+ .a2w_ana = { 0x00900000, 0x0000000c, 0x00000000, 0x00000000 }
+ },
+
+ [CPRMAN_PLLB] = {
+ /* unknown */
+ .cm = 0x00000000,
+ .a2w_ctrl = 0x00000000,
+ .a2w_frac = 0x00000000,
+ .a2w_ana = { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }
+ }
+};
+
+typedef struct PLLChannelResetInfo {
+ /*
+ * Even though a PLL channel has a CM register, it shares it with its
+ * parent PLL. The parent already takes care of the reset value.
+ */
+ uint32_t a2w_ctrl;
+} PLLChannelResetInfo;
+
+static const PLLChannelResetInfo PLL_CHANNEL_RESET_INFO[] = {
+ [CPRMAN_PLLA_CHANNEL_DSI0] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLA_CHANNEL_CORE] = { .a2w_ctrl = 0x00000003 },
+ [CPRMAN_PLLA_CHANNEL_PER] = { .a2w_ctrl = 0x00000000 }, /* unknown */
+ [CPRMAN_PLLA_CHANNEL_CCP2] = { .a2w_ctrl = 0x00000100 },
+
+ [CPRMAN_PLLC_CHANNEL_CORE2] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLC_CHANNEL_CORE1] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLC_CHANNEL_PER] = { .a2w_ctrl = 0x00000002 },
+ [CPRMAN_PLLC_CHANNEL_CORE0] = { .a2w_ctrl = 0x00000002 },
+
+ [CPRMAN_PLLD_CHANNEL_DSI0] = { .a2w_ctrl = 0x00000100 },
+ [CPRMAN_PLLD_CHANNEL_CORE] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLD_CHANNEL_PER] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLD_CHANNEL_DSI1] = { .a2w_ctrl = 0x00000100 },
+
+ [CPRMAN_PLLH_CHANNEL_AUX] = { .a2w_ctrl = 0x00000004 },
+ [CPRMAN_PLLH_CHANNEL_RCAL] = { .a2w_ctrl = 0x00000000 },
+ [CPRMAN_PLLH_CHANNEL_PIX] = { .a2w_ctrl = 0x00000000 },
+
+ [CPRMAN_PLLB_CHANNEL_ARM] = { .a2w_ctrl = 0x00000000 }, /* unknown */
+};
+
+typedef struct ClockMuxResetInfo {
+ uint32_t cm_ctl;
+ uint32_t cm_div;
+} ClockMuxResetInfo;
+
+static const ClockMuxResetInfo CLOCK_MUX_RESET_INFO[] = {
+ [CPRMAN_CLOCK_GNRIC] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_VPU] = {
+ .cm_ctl = 0x00000245,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_SYS] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_PERIA] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_PERII] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_H264] = {
+ .cm_ctl = 0x00000244,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_ISP] = {
+ .cm_ctl = 0x00000244,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_V3D] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_CAM0] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_CAM1] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_CCP2] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_DSI0E] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_DSI0P] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_DPI] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_GP0] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_GP1] = {
+ .cm_ctl = 0x00000096,
+ .cm_div = 0x00014000,
+ },
+
+ [CPRMAN_CLOCK_GP2] = {
+ .cm_ctl = 0x00000291,
+ .cm_div = 0x00249f00,
+ },
+
+ [CPRMAN_CLOCK_HSM] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_OTP] = {
+ .cm_ctl = 0x00000091,
+ .cm_div = 0x00004000,
+ },
+
+ [CPRMAN_CLOCK_PCM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_PWM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_SLIM] = {
+ .cm_ctl = 0x00000200,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_SMI] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_TEC] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_TD0] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_TD1] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_TSENS] = {
+ .cm_ctl = 0x00000091,
+ .cm_div = 0x0000a000,
+ },
+
+ [CPRMAN_CLOCK_TIMER] = {
+ .cm_ctl = 0x00000291,
+ .cm_div = 0x00013333,
+ },
+
+ [CPRMAN_CLOCK_UART] = {
+ .cm_ctl = 0x00000296,
+ .cm_div = 0x0000a6ab,
+ },
+
+ [CPRMAN_CLOCK_VEC] = {
+ .cm_ctl = 0x00000097,
+ .cm_div = 0x00002000,
+ },
+
+ [CPRMAN_CLOCK_PULSE] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_SDC] = {
+ .cm_ctl = 0x00004006,
+ .cm_div = 0x00003000,
+ },
+
+ [CPRMAN_CLOCK_ARM] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+
+ [CPRMAN_CLOCK_AVEO] = {
+ .cm_ctl = 0x00000000,
+ .cm_div = 0x00000000,
+ },
+
+ [CPRMAN_CLOCK_EMMC] = {
+ .cm_ctl = 0x00000295,
+ .cm_div = 0x00006000,
+ },
+
+ [CPRMAN_CLOCK_EMMC2] = {
+ .cm_ctl = 0, /* unknown */
+ .cm_div = 0
+ },
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_mbox.h b/include/hw/misc/bcm2835_mbox.h
index 57f95cc35e..ade27af25d 100644
--- a/include/hw/misc/bcm2835_mbox.h
+++ b/include/hw/misc/bcm2835_mbox.h
@@ -10,10 +10,10 @@
#include "bcm2835_mbox_defs.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_MBOX "bcm2835-mbox"
-#define BCM2835_MBOX(obj) \
- OBJECT_CHECK(BCM2835MboxState, (obj), TYPE_BCM2835_MBOX)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835MboxState, BCM2835_MBOX)
typedef struct {
uint32_t reg[MBOX_SIZE];
@@ -22,7 +22,7 @@ typedef struct {
uint32_t config;
} BCM2835Mbox;
-typedef struct {
+struct BCM2835MboxState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -34,6 +34,6 @@ typedef struct {
bool mbox_irq_disabled;
bool available[MBOX_CHAN_COUNT];
BCM2835Mbox mbox[2];
-} BCM2835MboxState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_mphi.h b/include/hw/misc/bcm2835_mphi.h
index e084314d0f..751363f496 100644
--- a/include/hw/misc/bcm2835_mphi.h
+++ b/include/hw/misc/bcm2835_mphi.h
@@ -19,6 +19,7 @@
#include "hw/irq.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define MPHI_MMIO_SIZE 0x1000
@@ -38,7 +39,6 @@ struct BCM2835MphiState {
#define TYPE_BCM2835_MPHI "bcm2835-mphi"
-#define BCM2835_MPHI(obj) \
- OBJECT_CHECK(BCM2835MphiState, (obj), TYPE_BCM2835_MPHI)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835MphiState, BCM2835_MPHI)
#endif
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
new file mode 100644
index 0000000000..303b9a6f68
--- /dev/null
+++ b/include/hw/misc/bcm2835_powermgt.h
@@ -0,0 +1,29 @@
+/*
+ * BCM2835 Power Management emulation
+ *
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef BCM2835_POWERMGT_H
+#define BCM2835_POWERMGT_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
+
+struct BCM2835PowerMgtState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ uint32_t rstc;
+ uint32_t rsts;
+ uint32_t wdog;
+};
+
+#endif
diff --git a/include/hw/misc/bcm2835_property.h b/include/hw/misc/bcm2835_property.h
index b321f22499..ba8896610c 100644
--- a/include/hw/misc/bcm2835_property.h
+++ b/include/hw/misc/bcm2835_property.h
@@ -11,12 +11,12 @@
#include "hw/sysbus.h"
#include "net/net.h"
#include "hw/display/bcm2835_fb.h"
+#include "qom/object.h"
#define TYPE_BCM2835_PROPERTY "bcm2835-property"
-#define BCM2835_PROPERTY(obj) \
- OBJECT_CHECK(BCM2835PropertyState, (obj), TYPE_BCM2835_PROPERTY)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PropertyState, BCM2835_PROPERTY)
-typedef struct {
+struct BCM2835PropertyState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -30,7 +30,8 @@ typedef struct {
MACAddr macaddr;
uint32_t board_rev;
uint32_t addr;
+ char *command_line;
bool pending;
-} BCM2835PropertyState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_rng.h b/include/hw/misc/bcm2835_rng.h
index 41a531bce7..7c1fb3ef40 100644
--- a/include/hw/misc/bcm2835_rng.h
+++ b/include/hw/misc/bcm2835_rng.h
@@ -11,17 +11,17 @@
#define BCM2835_RNG_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_RNG "bcm2835-rng"
-#define BCM2835_RNG(obj) \
- OBJECT_CHECK(BCM2835RngState, (obj), TYPE_BCM2835_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835RngState, BCM2835_RNG)
-typedef struct {
+struct BCM2835RngState {
SysBusDevice busdev;
MemoryRegion iomem;
uint32_t rng_ctrl;
uint32_t rng_status;
-} BCM2835RngState;
+};
#endif
diff --git a/include/hw/misc/bcm2835_thermal.h b/include/hw/misc/bcm2835_thermal.h
index c3651b27ec..f90f9e487c 100644
--- a/include/hw/misc/bcm2835_thermal.h
+++ b/include/hw/misc/bcm2835_thermal.h
@@ -10,18 +10,18 @@
#define HW_MISC_BCM2835_THERMAL_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_BCM2835_THERMAL "bcm2835-thermal"
-#define BCM2835_THERMAL(obj) \
- OBJECT_CHECK(Bcm2835ThermalState, (obj), TYPE_BCM2835_THERMAL)
+OBJECT_DECLARE_SIMPLE_TYPE(Bcm2835ThermalState, BCM2835_THERMAL)
-typedef struct {
+struct Bcm2835ThermalState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
uint32_t ctl;
-} Bcm2835ThermalState;
+};
#endif
diff --git a/include/hw/misc/djmemc.h b/include/hw/misc/djmemc.h
new file mode 100644
index 0000000000..82d4e4a2fe
--- /dev/null
+++ b/include/hw/misc/djmemc.h
@@ -0,0 +1,30 @@
+/*
+ * djMEMC, macintosh memory and interrupt controller
+ * (Quadra 610/650/800 & Centris 610/650)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_DJMEMC_H
+#define HW_MISC_DJMEMC_H
+
+#include "hw/sysbus.h"
+
+#define DJMEMC_SIZE 0x2000
+#define DJMEMC_NUM_REGS (0x38 / sizeof(uint32_t))
+
+#define DJMEMC_MAXBANKS 10
+
+struct DJMEMCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mem_regs;
+
+ /* Memory controller */
+ uint32_t regs[DJMEMC_NUM_REGS];
+};
+
+#define TYPE_DJMEMC "djMEMC"
+OBJECT_DECLARE_SIMPLE_TYPE(DJMEMCState, DJMEMC);
+
+#endif
diff --git a/include/hw/misc/grlib_ahb_apb_pnp.h b/include/hw/misc/grlib_ahb_apb_pnp.h
index a0f6dcfda7..bab0b5f47f 100644
--- a/include/hw/misc/grlib_ahb_apb_pnp.h
+++ b/include/hw/misc/grlib_ahb_apb_pnp.h
@@ -23,16 +23,13 @@
#ifndef GRLIB_AHB_APB_PNP_H
#define GRLIB_AHB_APB_PNP_H
+#include "qom/object.h"
-#define TYPE_GRLIB_AHB_PNP "grlib,ahbpnp"
-#define GRLIB_AHB_PNP(obj) \
- OBJECT_CHECK(AHBPnp, (obj), TYPE_GRLIB_AHB_PNP)
-typedef struct AHBPnp AHBPnp;
+#define TYPE_GRLIB_AHB_PNP "grlib-ahbpnp"
+OBJECT_DECLARE_SIMPLE_TYPE(AHBPnp, GRLIB_AHB_PNP)
-#define TYPE_GRLIB_APB_PNP "grlib,apbpnp"
-#define GRLIB_APB_PNP(obj) \
- OBJECT_CHECK(APBPnp, (obj), TYPE_GRLIB_APB_PNP)
-typedef struct APBPnp APBPnp;
+#define TYPE_GRLIB_APB_PNP "grlib-apbpnp"
+OBJECT_DECLARE_SIMPLE_TYPE(APBPnp, GRLIB_APB_PNP)
void grlib_ahb_pnp_add_entry(AHBPnp *dev, uint32_t address, uint32_t mask,
uint8_t vendor, uint16_t device, int slave,
diff --git a/include/hw/misc/imx25_ccm.h b/include/hw/misc/imx25_ccm.h
index 296321c612..c3b89018c6 100644
--- a/include/hw/misc/imx25_ccm.h
+++ b/include/hw/misc/imx25_ccm.h
@@ -12,6 +12,7 @@
#define IMX25_CCM_H
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
#define IMX25_CCM_MPCTL_REG 0
#define IMX25_CCM_UPCTL_REG 1
@@ -63,9 +64,9 @@
CCTL_##name##_SHIFT)
#define TYPE_IMX25_CCM "imx25.ccm"
-#define IMX25_CCM(obj) OBJECT_CHECK(IMX25CCMState, (obj), TYPE_IMX25_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX25CCMState, IMX25_CCM)
-typedef struct IMX25CCMState {
+struct IMX25CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -74,6 +75,6 @@ typedef struct IMX25CCMState {
uint32_t reg[IMX25_CCM_MAX_REG];
-} IMX25CCMState;
+};
#endif /* IMX25_CCM_H */
diff --git a/include/hw/misc/imx31_ccm.h b/include/hw/misc/imx31_ccm.h
index c376fad14c..18e08ee84f 100644
--- a/include/hw/misc/imx31_ccm.h
+++ b/include/hw/misc/imx31_ccm.h
@@ -12,6 +12,7 @@
#define IMX31_CCM_H
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
#define IMX31_CCM_CCMR_REG 0
#define IMX31_CCM_PDR0_REG 1
@@ -72,9 +73,9 @@
PDR0_##name##_PODF_SHIFT)
#define TYPE_IMX31_CCM "imx31.ccm"
-#define IMX31_CCM(obj) OBJECT_CHECK(IMX31CCMState, (obj), TYPE_IMX31_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX31CCMState, IMX31_CCM)
-typedef struct IMX31CCMState {
+struct IMX31CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -83,6 +84,6 @@ typedef struct IMX31CCMState {
uint32_t reg[IMX31_CCM_MAX_REG];
-} IMX31CCMState;
+};
#endif /* IMX31_CCM_H */
diff --git a/include/hw/misc/imx6_ccm.h b/include/hw/misc/imx6_ccm.h
index 80505809b4..ccf46d7353 100644
--- a/include/hw/misc/imx6_ccm.h
+++ b/include/hw/misc/imx6_ccm.h
@@ -13,6 +13,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define CCM_CCR 0
#define CCM_CCDR 1
@@ -178,9 +179,9 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
#define TYPE_IMX6_CCM "imx6.ccm"
-#define IMX6_CCM(obj) OBJECT_CHECK(IMX6CCMState, (obj), TYPE_IMX6_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6CCMState, IMX6_CCM)
-typedef struct IMX6CCMState {
+struct IMX6CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -192,6 +193,6 @@ typedef struct IMX6CCMState {
uint32_t ccm[CCM_MAX];
uint32_t analog[CCM_ANALOG_MAX];
-} IMX6CCMState;
+};
#endif /* IMX6_CCM_H */
diff --git a/include/hw/misc/imx6_src.h b/include/hw/misc/imx6_src.h
index eb3640732e..f380da3810 100644
--- a/include/hw/misc/imx6_src.h
+++ b/include/hw/misc/imx6_src.h
@@ -13,6 +13,7 @@
#include "hw/sysbus.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define SRC_SCR 0
#define SRC_SBMR1 1
@@ -57,9 +58,9 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
#define TYPE_IMX6_SRC "imx6.src"
-#define IMX6_SRC(obj) OBJECT_CHECK(IMX6SRCState, (obj), TYPE_IMX6_SRC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6SRCState, IMX6_SRC)
-typedef struct IMX6SRCState {
+struct IMX6SRCState {
/* <private> */
SysBusDevice parent_obj;
@@ -68,6 +69,6 @@ typedef struct IMX6SRCState {
uint32_t regs[SRC_MAX];
-} IMX6SRCState;
+};
#endif /* IMX6_SRC_H */
diff --git a/include/hw/misc/imx6ul_ccm.h b/include/hw/misc/imx6ul_ccm.h
index 377ddca244..edb5f784d5 100644
--- a/include/hw/misc/imx6ul_ccm.h
+++ b/include/hw/misc/imx6ul_ccm.h
@@ -12,6 +12,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
#define CCM_CCR 0
#define CCM_CCDR 1
@@ -207,9 +208,9 @@
#define CCM_ANALOG_PLL_LOCK (1 << 31);
#define TYPE_IMX6UL_CCM "imx6ul.ccm"
-#define IMX6UL_CCM(obj) OBJECT_CHECK(IMX6ULCCMState, (obj), TYPE_IMX6UL_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX6ULCCMState, IMX6UL_CCM)
-typedef struct IMX6ULCCMState {
+struct IMX6ULCCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -221,6 +222,6 @@ typedef struct IMX6ULCCMState {
uint32_t ccm[CCM_MAX];
uint32_t analog[CCM_ANALOG_MAX];
-} IMX6ULCCMState;
+};
#endif /* IMX6UL_CCM_H */
diff --git a/include/hw/misc/imx7_ccm.h b/include/hw/misc/imx7_ccm.h
index 9538f37d98..dcaebfb4ef 100644
--- a/include/hw/misc/imx7_ccm.h
+++ b/include/hw/misc/imx7_ccm.h
@@ -14,6 +14,7 @@
#include "hw/misc/imx_ccm.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
enum IMX7AnalogRegisters {
ANALOG_PLL_ARM,
@@ -104,9 +105,9 @@ enum IMX7PMURegisters {
};
#define TYPE_IMX7_CCM "imx7.ccm"
-#define IMX7_CCM(obj) OBJECT_CHECK(IMX7CCMState, (obj), TYPE_IMX7_CCM)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7CCMState, IMX7_CCM)
-typedef struct IMX7CCMState {
+struct IMX7CCMState {
/* <private> */
IMXCCMState parent_obj;
@@ -114,13 +115,13 @@ typedef struct IMX7CCMState {
MemoryRegion iomem;
uint32_t ccm[CCM_MAX];
-} IMX7CCMState;
+};
#define TYPE_IMX7_ANALOG "imx7.analog"
-#define IMX7_ANALOG(obj) OBJECT_CHECK(IMX7AnalogState, (obj), TYPE_IMX7_ANALOG)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7AnalogState, IMX7_ANALOG)
-typedef struct IMX7AnalogState {
+struct IMX7AnalogState {
/* <private> */
IMXCCMState parent_obj;
@@ -134,6 +135,6 @@ typedef struct IMX7AnalogState {
uint32_t analog[ANALOG_MAX];
uint32_t pmu[PMU_MAX];
-} IMX7AnalogState;
+};
#endif /* IMX7_CCM_H */
diff --git a/include/hw/misc/imx7_gpr.h b/include/hw/misc/imx7_gpr.h
index e19373d274..df364bd8f0 100644
--- a/include/hw/misc/imx7_gpr.h
+++ b/include/hw/misc/imx7_gpr.h
@@ -14,15 +14,16 @@
#include "qemu/bitops.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX7_GPR "imx7.gpr"
-#define IMX7_GPR(obj) OBJECT_CHECK(IMX7GPRState, (obj), TYPE_IMX7_GPR)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7GPRState, IMX7_GPR)
-typedef struct IMX7GPRState {
+struct IMX7GPRState {
/* <private> */
SysBusDevice parent_obj;
MemoryRegion mmio;
-} IMX7GPRState;
+};
#endif /* IMX7_GPR_H */
diff --git a/include/hw/misc/imx7_snvs.h b/include/hw/misc/imx7_snvs.h
index 255f8f26f9..1272076086 100644
--- a/include/hw/misc/imx7_snvs.h
+++ b/include/hw/misc/imx7_snvs.h
@@ -14,22 +14,28 @@
#include "qemu/bitops.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
enum IMX7SNVSRegisters {
SNVS_LPCR = 0x38,
SNVS_LPCR_TOP = BIT(6),
- SNVS_LPCR_DP_EN = BIT(5)
+ SNVS_LPCR_DP_EN = BIT(5),
+ SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */
+ SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */
};
#define TYPE_IMX7_SNVS "imx7.snvs"
-#define IMX7_SNVS(obj) OBJECT_CHECK(IMX7SNVSState, (obj), TYPE_IMX7_SNVS)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7SNVSState, IMX7_SNVS)
-typedef struct IMX7SNVSState {
+struct IMX7SNVSState {
/* <private> */
SysBusDevice parent_obj;
MemoryRegion mmio;
-} IMX7SNVSState;
+
+ uint64_t tick_offset;
+ uint64_t lpcr;
+};
#endif /* IMX7_SNVS_H */
diff --git a/include/hw/misc/imx7_src.h b/include/hw/misc/imx7_src.h
new file mode 100644
index 0000000000..b4b97dcb1c
--- /dev/null
+++ b/include/hw/misc/imx7_src.h
@@ -0,0 +1,66 @@
+/*
+ * IMX7 System Reset Controller
+ *
+ * Copyright (C) 2023 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef IMX7_SRC_H
+#define IMX7_SRC_H
+
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "qom/object.h"
+
+#define SRC_SCR 0
+#define SRC_A7RCR0 1
+#define SRC_A7RCR1 2
+#define SRC_M4RCR 3
+#define SRC_ERCR 5
+#define SRC_HSICPHY_RCR 7
+#define SRC_USBOPHY1_RCR 8
+#define SRC_USBOPHY2_RCR 9
+#define SRC_MPIPHY_RCR 10
+#define SRC_PCIEPHY_RCR 11
+#define SRC_SBMR1 22
+#define SRC_SRSR 23
+#define SRC_SISR 26
+#define SRC_SIMR 27
+#define SRC_SBMR2 28
+#define SRC_GPR1 29
+#define SRC_GPR2 30
+#define SRC_GPR3 31
+#define SRC_GPR4 32
+#define SRC_GPR5 33
+#define SRC_GPR6 34
+#define SRC_GPR7 35
+#define SRC_GPR8 36
+#define SRC_GPR9 37
+#define SRC_GPR10 38
+#define SRC_MAX 39
+
+/* SRC_A7SCR1 */
+#define R_CORE1_ENABLE_SHIFT 1
+#define R_CORE1_ENABLE_LENGTH 1
+/* SRC_A7SCR0 */
+#define R_CORE1_RST_SHIFT 5
+#define R_CORE1_RST_LENGTH 1
+#define R_CORE0_RST_SHIFT 4
+#define R_CORE0_RST_LENGTH 1
+
+#define TYPE_IMX7_SRC "imx7.src"
+OBJECT_DECLARE_SIMPLE_TYPE(IMX7SRCState, IMX7_SRC)
+
+struct IMX7SRCState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ uint32_t regs[SRC_MAX];
+};
+
+#endif /* IMX7_SRC_H */
diff --git a/include/hw/misc/imx_ccm.h b/include/hw/misc/imx_ccm.h
index 33cbc09952..7e5678e972 100644
--- a/include/hw/misc/imx_ccm.h
+++ b/include/hw/misc/imx_ccm.h
@@ -12,6 +12,7 @@
#define IMX_CCM_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define CKIL_FREQ 32768 /* nominal 32khz clock */
@@ -27,20 +28,15 @@
#define PLL_MFN(x) (((x) & 0x3ff) << 0)
#define TYPE_IMX_CCM "imx.ccm"
-#define IMX_CCM(obj) \
- OBJECT_CHECK(IMXCCMState, (obj), TYPE_IMX_CCM)
-#define IMX_CCM_CLASS(klass) \
- OBJECT_CLASS_CHECK(IMXCCMClass, (klass), TYPE_IMX_CCM)
-#define IMX_GET_CLASS(obj) \
- OBJECT_GET_CLASS(IMXCCMClass, (obj), TYPE_IMX_CCM)
+OBJECT_DECLARE_TYPE(IMXCCMState, IMXCCMClass, IMX_CCM)
-typedef struct IMXCCMState {
+struct IMXCCMState {
/* <private> */
SysBusDevice parent_obj;
/* <public> */
-} IMXCCMState;
+};
typedef enum {
CLK_NONE,
@@ -52,13 +48,13 @@ typedef enum {
CLK_HIGH,
} IMXClk;
-typedef struct IMXCCMClass {
+struct IMXCCMClass {
/* <private> */
SysBusDeviceClass parent_class;
/* <public> */
uint32_t (*get_clock_frequency)(IMXCCMState *s, IMXClk clk);
-} IMXCCMClass;
+};
uint32_t imx_ccm_calc_pll(uint32_t pllreg, uint32_t base_freq);
diff --git a/include/hw/misc/imx_rngc.h b/include/hw/misc/imx_rngc.h
index f0d2b44d4f..34ad699225 100644
--- a/include/hw/misc/imx_rngc.h
+++ b/include/hw/misc/imx_rngc.h
@@ -11,11 +11,12 @@
#define IMX_RNGC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IMX_RNGC "imx.rngc"
-#define IMX_RNGC(obj) OBJECT_CHECK(IMXRNGCState, (obj), TYPE_IMX_RNGC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXRNGCState, IMX_RNGC)
-typedef struct IMXRNGCState {
+struct IMXRNGCState {
/*< private >*/
SysBusDevice parent_obj;
@@ -30,6 +31,6 @@ typedef struct IMXRNGCState {
QEMUBH *self_test_bh;
QEMUBH *seed_bh;
qemu_irq irq;
-} IMXRNGCState;
+};
#endif /* IMX_RNGC_H */
diff --git a/include/hw/misc/iosb.h b/include/hw/misc/iosb.h
new file mode 100644
index 0000000000..377f8ca7e2
--- /dev/null
+++ b/include/hw/misc/iosb.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU IOSB emulation
+ *
+ * Copyright (c) 2019 Laurent Vivier
+ * Copyright (c) 2022 Mark Cave-Ayland
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MEM_IOSB_H
+#define HW_MEM_IOSB_H
+
+#define IOSB_REGS 7
+
+struct IOSBState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mem_regs;
+ uint32_t regs[IOSB_REGS];
+};
+
+#define TYPE_IOSB "IOSB"
+OBJECT_DECLARE_SIMPLE_TYPE(IOSBState, IOSB);
+
+#endif
diff --git a/include/hw/misc/iotkit-secctl.h b/include/hw/misc/iotkit-secctl.h
index bcb0437be5..79a3628320 100644
--- a/include/hw/misc/iotkit-secctl.h
+++ b/include/hw/misc/iotkit-secctl.h
@@ -11,7 +11,7 @@
/* This is a model of the security controller which is part of the
* Arm IoT Kit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
*
* QEMU interface:
* + sysbus MMIO region 0 is the "secure privilege control block" registers
@@ -56,9 +56,10 @@
#define IOTKIT_SECCTL_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SECCTL "iotkit-secctl"
-#define IOTKIT_SECCTL(obj) OBJECT_CHECK(IoTKitSecCtl, (obj), TYPE_IOTKIT_SECCTL)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSecCtl, IOTKIT_SECCTL)
#define IOTS_APB_PPC0_NUM_PORTS 3
#define IOTS_APB_PPC1_NUM_PORTS 1
@@ -70,7 +71,6 @@
#define IOTS_NUM_MPC 4
#define IOTS_NUM_EXP_MSC 16
-typedef struct IoTKitSecCtl IoTKitSecCtl;
/* State and IRQ lines relating to a PPC. For the
* PPCs in the IoTKit not all the IRQ lines are used.
@@ -120,6 +120,8 @@ struct IoTKitSecCtl {
IoTKitSecCtlPPC apb[IOTS_NUM_APB_PPC];
IoTKitSecCtlPPC apbexp[IOTS_NUM_APB_EXP_PPC];
IoTKitSecCtlPPC ahbexp[IOTS_NUM_APB_EXP_PPC];
+
+ uint32_t sse_version;
};
#endif
diff --git a/include/hw/misc/iotkit-sysctl.h b/include/hw/misc/iotkit-sysctl.h
index 601c8ecc0d..481e27f4db 100644
--- a/include/hw/misc/iotkit-sysctl.h
+++ b/include/hw/misc/iotkit-sysctl.h
@@ -12,14 +12,13 @@
/*
* This is a model of the "system control element" which is part of the
* Arm IoTKit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
* Specifically, it implements the "system information block" and
* "system control register" blocks.
*
* QEMU interface:
- * + QOM property "SYS_VERSION": value of the SYS_VERSION register of the
- * system information block of the SSE
- * (used to identify whether to provide SSE-200-only registers)
+ * + QOM property "sse-version": indicates which SSE version this is part of
+ * (used to identify whether to provide SSE-200-only registers, etc)
* + sysbus MMIO region 0: the system information register bank
* + sysbus MMIO region 1: the system control register bank
*/
@@ -28,12 +27,12 @@
#define HW_MISC_IOTKIT_SYSCTL_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SYSCTL "iotkit-sysctl"
-#define IOTKIT_SYSCTL(obj) OBJECT_CHECK(IoTKitSysCtl, (obj), \
- TYPE_IOTKIT_SYSCTL)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSysCtl, IOTKIT_SYSCTL)
-typedef struct IoTKitSysCtl {
+struct IoTKitSysCtl {
/*< private >*/
SysBusDevice parent_obj;
@@ -54,19 +53,21 @@ typedef struct IoTKitSysCtl {
uint32_t initsvtor1;
uint32_t nmi_enable;
uint32_t ewctrl;
+ uint32_t pwrctrl;
uint32_t pdcm_pd_sys_sense;
uint32_t pdcm_pd_sram0_sense;
uint32_t pdcm_pd_sram1_sense;
uint32_t pdcm_pd_sram2_sense;
uint32_t pdcm_pd_sram3_sense;
+ uint32_t pdcm_pd_cpu0_sense;
+ uint32_t pdcm_pd_vmr0_sense;
+ uint32_t pdcm_pd_vmr1_sense;
/* Properties */
- uint32_t sys_version;
+ uint32_t sse_version;
uint32_t cpuwait_rst;
uint32_t initsvtor0_rst;
uint32_t initsvtor1_rst;
-
- bool is_sse200;
-} IoTKitSysCtl;
+};
#endif
diff --git a/include/hw/misc/iotkit-sysinfo.h b/include/hw/misc/iotkit-sysinfo.h
index d84eb203b9..91c23f90d2 100644
--- a/include/hw/misc/iotkit-sysinfo.h
+++ b/include/hw/misc/iotkit-sysinfo.h
@@ -12,7 +12,7 @@
/*
* This is a model of the "system information block" which is part of the
* Arm IoTKit and documented in
- * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ecm0601256/index.html
+ * https://developer.arm.com/documentation/ecm0601256/latest
* QEMU interface:
* + QOM property "SYS_VERSION": value to use for SYS_VERSION register
* + QOM property "SYS_CONFIG": value to use for SYS_CONFIG register
@@ -23,12 +23,12 @@
#define HW_MISC_IOTKIT_SYSINFO_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_IOTKIT_SYSINFO "iotkit-sysinfo"
-#define IOTKIT_SYSINFO(obj) OBJECT_CHECK(IoTKitSysInfo, (obj), \
- TYPE_IOTKIT_SYSINFO)
+OBJECT_DECLARE_SIMPLE_TYPE(IoTKitSysInfo, IOTKIT_SYSINFO)
-typedef struct IoTKitSysInfo {
+struct IoTKitSysInfo {
/*< private >*/
SysBusDevice parent_obj;
@@ -38,6 +38,8 @@ typedef struct IoTKitSysInfo {
/* Properties */
uint32_t sys_version;
uint32_t sys_config;
-} IoTKitSysInfo;
+ uint32_t sse_version;
+ uint32_t iidr;
+};
#endif
diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h
new file mode 100644
index 0000000000..f01c0f680a
--- /dev/null
+++ b/include/hw/misc/lasi.h
@@ -0,0 +1,79 @@
+/*
+ * HP-PARISC Lasi chipset emulation.
+ *
+ * (C) 2019 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Documentation available at:
+ * https://parisc.wiki.kernel.org/images-parisc/7/79/Lasi_ers.pdf
+ */
+
+#ifndef LASI_H
+#define LASI_H
+
+#include "exec/address-spaces.h"
+#include "hw/pci/pci_host.h"
+#include "hw/boards.h"
+
+#define TYPE_LASI_CHIP "lasi-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP)
+
+#define LASI_IRR 0x00 /* RO */
+#define LASI_IMR 0x04
+#define LASI_IPR 0x08
+#define LASI_ICR 0x0c
+#define LASI_IAR 0x10
+
+#define LASI_LPT 0x02000
+#define LASI_AUDIO 0x04000
+#define LASI_UART 0x05000
+#define LASI_LAN 0x07000
+#define LASI_RTC 0x09000
+#define LASI_FDC 0x0A000
+
+#define LASI_PCR 0x0C000 /* LASI Power Control register */
+#define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */
+#define LASI_VER 0x0C008 /* LASI Version Control register */
+#define LASI_IORESET 0x0C00C /* LASI I/O Reset register */
+#define LASI_AMR 0x0C010 /* LASI Arbitration Mask register */
+#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */
+#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */
+
+#define LASI_BIT(x) (1ul << (x))
+#define LASI_IRQ_BITS (LASI_BIT(5) | LASI_BIT(7) | LASI_BIT(8) | LASI_BIT(9) \
+ | LASI_BIT(13) | LASI_BIT(14) | LASI_BIT(16) | LASI_BIT(17) \
+ | LASI_BIT(18) | LASI_BIT(19) | LASI_BIT(20) | LASI_BIT(21) \
+ | LASI_BIT(26))
+
+#define ICR_BUS_ERROR_BIT LASI_BIT(8) /* bit 8 in ICR */
+#define ICR_TOC_BIT LASI_BIT(1) /* bit 1 in ICR */
+
+#define LASI_IRQS 27
+
+#define LASI_IRQ_HPA 14
+#define LASI_IRQ_UART_HPA 5
+#define LASI_IRQ_LPT_HPA 7
+#define LASI_IRQ_LAN_HPA 8
+#define LASI_IRQ_SCSI_HPA 9
+#define LASI_IRQ_AUDIO_HPA 13
+#define LASI_IRQ_PS2KBD_HPA 26
+#define LASI_IRQ_PS2MOU_HPA 26
+
+struct LasiState {
+ PCIHostState parent_obj;
+
+ uint32_t irr;
+ uint32_t imr;
+ uint32_t ipr;
+ uint32_t icr;
+ uint32_t iar;
+
+ uint32_t errlog;
+ uint32_t amr;
+ uint32_t rtc_ref;
+
+ MemoryRegion this_mem;
+};
+
+#endif
diff --git a/include/hw/misc/led.h b/include/hw/misc/led.h
new file mode 100644
index 0000000000..29c0879570
--- /dev/null
+++ b/include/hw/misc/led.h
@@ -0,0 +1,98 @@
+/*
+ * QEMU single LED device
+ *
+ * Copyright (C) 2020 Philippe Mathieu-Daudé <f4bug@amsat.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MISC_LED_H
+#define HW_MISC_LED_H
+
+#include "qom/object.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_LED "led"
+
+/**
+ * LEDColor: Color of a LED
+ *
+ * This set is restricted to physically available LED colors.
+ *
+ * LED colors from 'Table 1. Product performance of LUXEON Rebel Color
+ * Line' of the 'DS68 LUXEON Rebel Color Line' datasheet available at:
+ * https://www.lumileds.com/products/color-leds/luxeon-rebel-color/
+ */
+typedef enum { /* Coarse wavelength range */
+ LED_COLOR_VIOLET, /* 425 nm */
+ LED_COLOR_BLUE, /* 475 nm */
+ LED_COLOR_CYAN, /* 500 nm */
+ LED_COLOR_GREEN, /* 535 nm */
+ LED_COLOR_YELLOW, /* 567 nm */
+ LED_COLOR_AMBER, /* 590 nm */
+ LED_COLOR_ORANGE, /* 615 nm */
+ LED_COLOR_RED, /* 630 nm */
+} LEDColor;
+
+struct LEDState {
+ /* Private */
+ DeviceState parent_obj;
+ /* Public */
+
+ uint8_t intensity_percent;
+ qemu_irq irq;
+
+ /* Properties */
+ char *description;
+ char *color;
+ /*
+ * Determines whether a GPIO is using a positive (active-high)
+ * logic (when used with GPIO, the intensity at reset is related
+ * to the GPIO polarity).
+ */
+ bool gpio_active_high;
+};
+typedef struct LEDState LEDState;
+DECLARE_INSTANCE_CHECKER(LEDState, LED, TYPE_LED)
+
+/**
+ * led_set_intensity: Set the intensity of a LED device
+ * @s: the LED object
+ * @intensity_percent: intensity as percentage in range 0 to 100.
+ */
+void led_set_intensity(LEDState *s, unsigned intensity_percent);
+
+/**
+ * led_get_intensity:
+ * @s: the LED object
+ *
+ * Returns: The LED intensity as percentage in range 0 to 100.
+ */
+unsigned led_get_intensity(LEDState *s);
+
+/**
+ * led_set_state: Set the state of a LED device
+ * @s: the LED object
+ * @is_emitting: boolean indicating whether the LED is emitting
+ *
+ * This utility is meant for LED connected to GPIO.
+ */
+void led_set_state(LEDState *s, bool is_emitting);
+
+/**
+ * led_create_simple: Create and realize a LED device
+ * @parentobj: the parent object
+ * @gpio_polarity: GPIO polarity
+ * @color: color of the LED
+ * @description: description of the LED (optional)
+ *
+ * Create the device state structure, initialize it, and
+ * drop the reference to it (the device is realized).
+ *
+ * Returns: The newly allocated and instantiated LED object.
+ */
+LEDState *led_create_simple(Object *parentobj,
+ GpioPolarity gpio_polarity,
+ LEDColor color,
+ const char *description);
+
+#endif /* HW_MISC_LED_H */
diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h
index 0be05d649b..63cdcf7c69 100644
--- a/include/hw/misc/mac_via.h
+++ b/include/hw/misc/mac_via.h
@@ -12,87 +12,43 @@
#include "exec/memory.h"
#include "hw/sysbus.h"
#include "hw/misc/mos6522.h"
+#include "hw/input/adb.h"
+#include "qom/object.h"
-/* VIA 1 */
-#define VIA1_IRQ_ONE_SECOND_BIT 0
-#define VIA1_IRQ_VBLANK_BIT 1
-#define VIA1_IRQ_ADB_READY_BIT 2
-#define VIA1_IRQ_ADB_DATA_BIT 3
-#define VIA1_IRQ_ADB_CLOCK_BIT 4
+#define VIA_SIZE 0x2000
-#define VIA1_IRQ_NB 8
+/* VIA 1 */
+#define VIA1_IRQ_ONE_SECOND_BIT CA2_INT_BIT
+#define VIA1_IRQ_60HZ_BIT CA1_INT_BIT
+#define VIA1_IRQ_ADB_READY_BIT SR_INT_BIT
+#define VIA1_IRQ_ADB_DATA_BIT CB2_INT_BIT
+#define VIA1_IRQ_ADB_CLOCK_BIT CB1_INT_BIT
-#define VIA1_IRQ_ONE_SECOND (1 << VIA1_IRQ_ONE_SECOND_BIT)
-#define VIA1_IRQ_VBLANK (1 << VIA1_IRQ_VBLANK_BIT)
-#define VIA1_IRQ_ADB_READY (1 << VIA1_IRQ_ADB_READY_BIT)
-#define VIA1_IRQ_ADB_DATA (1 << VIA1_IRQ_ADB_DATA_BIT)
-#define VIA1_IRQ_ADB_CLOCK (1 << VIA1_IRQ_ADB_CLOCK_BIT)
+#define VIA1_IRQ_ONE_SECOND BIT(VIA1_IRQ_ONE_SECOND_BIT)
+#define VIA1_IRQ_60HZ BIT(VIA1_IRQ_60HZ_BIT)
+#define VIA1_IRQ_ADB_READY BIT(VIA1_IRQ_ADB_READY_BIT)
+#define VIA1_IRQ_ADB_DATA BIT(VIA1_IRQ_ADB_DATA_BIT)
+#define VIA1_IRQ_ADB_CLOCK BIT(VIA1_IRQ_ADB_CLOCK_BIT)
#define TYPE_MOS6522_Q800_VIA1 "mos6522-q800-via1"
-#define MOS6522_Q800_VIA1(obj) OBJECT_CHECK(MOS6522Q800VIA1State, (obj), \
- TYPE_MOS6522_Q800_VIA1)
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA1State, MOS6522_Q800_VIA1)
-typedef struct MOS6522Q800VIA1State {
+struct MOS6522Q800VIA1State {
/*< private >*/
MOS6522State parent_obj;
- qemu_irq irqs[VIA1_IRQ_NB];
- uint8_t last_b;
- uint8_t PRAM[256];
-
- /* external timers */
- QEMUTimer *one_second_timer;
- int64_t next_second;
- QEMUTimer *VBL_timer;
- int64_t next_VBL;
-} MOS6522Q800VIA1State;
-
-
-/* VIA 2 */
-#define VIA2_IRQ_SCSI_DATA_BIT 0
-#define VIA2_IRQ_SLOT_BIT 1
-#define VIA2_IRQ_UNUSED_BIT 2
-#define VIA2_IRQ_SCSI_BIT 3
-#define VIA2_IRQ_ASC_BIT 4
+ MemoryRegion via_mem;
-#define VIA2_IRQ_NB 8
-
-#define VIA2_IRQ_SCSI_DATA (1 << VIA2_IRQ_SCSI_DATA_BIT)
-#define VIA2_IRQ_SLOT (1 << VIA2_IRQ_SLOT_BIT)
-#define VIA2_IRQ_UNUSED (1 << VIA2_IRQ_SCSI_BIT)
-#define VIA2_IRQ_SCSI (1 << VIA2_IRQ_UNUSED_BIT)
-#define VIA2_IRQ_ASC (1 << VIA2_IRQ_ASC_BIT)
-
-#define TYPE_MOS6522_Q800_VIA2 "mos6522-q800-via2"
-#define MOS6522_Q800_VIA2(obj) OBJECT_CHECK(MOS6522Q800VIA2State, (obj), \
- TYPE_MOS6522_Q800_VIA2)
-
-typedef struct MOS6522Q800VIA2State {
- /*< private >*/
- MOS6522State parent_obj;
-} MOS6522Q800VIA2State;
-
-
-#define TYPE_MAC_VIA "mac_via"
-#define MAC_VIA(obj) OBJECT_CHECK(MacVIAState, (obj), TYPE_MAC_VIA)
-
-typedef struct MacVIAState {
- SysBusDevice busdev;
+ qemu_irq auxmode_irq;
+ uint8_t last_b;
+ /* RTC */
+ uint8_t PRAM[256];
+ BlockBackend *blk;
VMChangeStateEntry *vmstate;
- /* MMIO */
- MemoryRegion mmio;
- MemoryRegion via1mem;
- MemoryRegion via2mem;
-
- /* VIAs */
- MOS6522Q800VIA1State mos6522_via1;
- MOS6522Q800VIA2State mos6522_via2;
-
- /* RTC */
uint32_t tick_offset;
uint8_t data_out;
@@ -102,7 +58,6 @@ typedef struct MacVIAState {
uint8_t cmd;
int wprotect;
int alt;
- BlockBackend *blk;
/* ADB */
ADBBusState adb_bus;
@@ -113,6 +68,48 @@ typedef struct MacVIAState {
uint8_t adb_data_in[128];
uint8_t adb_data_out[16];
uint8_t adb_autopoll_cmd;
-} MacVIAState;
+
+ /* external timers */
+ QEMUTimer *one_second_timer;
+ int64_t next_second;
+ QEMUTimer *sixty_hz_timer;
+ int64_t next_sixty_hz;
+
+ /* SETUPTIMEK hack */
+ int timer_hack_state;
+};
+
+
+/* VIA 2 */
+#define VIA2_IRQ_SCSI_DATA_BIT CA2_INT_BIT
+#define VIA2_IRQ_NUBUS_BIT CA1_INT_BIT
+#define VIA2_IRQ_SCSI_BIT CB2_INT_BIT
+#define VIA2_IRQ_ASC_BIT CB1_INT_BIT
+
+#define VIA2_IRQ_SCSI_DATA BIT(VIA2_IRQ_SCSI_DATA_BIT)
+#define VIA2_IRQ_NUBUS BIT(VIA2_IRQ_NUBUS_BIT)
+#define VIA2_IRQ_UNUSED BIT(VIA2_IRQ_SCSI_BIT)
+#define VIA2_IRQ_SCSI BIT(VIA2_IRQ_UNUSED_BIT)
+#define VIA2_IRQ_ASC BIT(VIA2_IRQ_ASC_BIT)
+
+#define VIA2_NUBUS_IRQ_NB 7
+
+#define VIA2_NUBUS_IRQ_9 0
+#define VIA2_NUBUS_IRQ_A 1
+#define VIA2_NUBUS_IRQ_B 2
+#define VIA2_NUBUS_IRQ_C 3
+#define VIA2_NUBUS_IRQ_D 4
+#define VIA2_NUBUS_IRQ_E 5
+#define VIA2_NUBUS_IRQ_INTVIDEO 6
+
+#define TYPE_MOS6522_Q800_VIA2 "mos6522-q800-via2"
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522Q800VIA2State, MOS6522_Q800_VIA2)
+
+struct MOS6522Q800VIA2State {
+ /*< private >*/
+ MOS6522State parent_obj;
+
+ MemoryRegion via_mem;
+};
#endif
diff --git a/include/hw/misc/macio/cuda.h b/include/hw/misc/macio/cuda.h
index a8cf0be1ec..8a6678c749 100644
--- a/include/hw/misc/macio/cuda.h
+++ b/include/hw/misc/macio/cuda.h
@@ -26,7 +26,9 @@
#ifndef CUDA_H
#define CUDA_H
+#include "hw/input/adb.h"
#include "hw/misc/mos6522.h"
+#include "qom/object.h"
/* CUDA commands (2nd byte) */
#define CUDA_WARM_START 0x0
@@ -58,20 +60,19 @@
/* MOS6522 CUDA */
-typedef struct MOS6522CUDAState {
+struct MOS6522CUDAState {
/*< private >*/
MOS6522State parent_obj;
-} MOS6522CUDAState;
+};
#define TYPE_MOS6522_CUDA "mos6522-cuda"
-#define MOS6522_CUDA(obj) OBJECT_CHECK(MOS6522CUDAState, (obj), \
- TYPE_MOS6522_CUDA)
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522CUDAState, MOS6522_CUDA)
/* Cuda */
#define TYPE_CUDA "cuda"
-#define CUDA(obj) OBJECT_CHECK(CUDAState, (obj), TYPE_CUDA)
+OBJECT_DECLARE_SIMPLE_TYPE(CUDAState, CUDA)
-typedef struct CUDAState {
+struct CUDAState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -97,6 +98,6 @@ typedef struct CUDAState {
qemu_irq irq;
uint8_t data_in[128];
uint8_t data_out[16];
-} CUDAState;
+};
#endif /* CUDA_H */
diff --git a/include/hw/misc/macio/gpio.h b/include/hw/misc/macio/gpio.h
index 24a4364b39..7d2aa886c2 100644
--- a/include/hw/misc/macio/gpio.h
+++ b/include/hw/misc/macio/gpio.h
@@ -28,22 +28,21 @@
#include "hw/ppc/openpic.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_MACIO_GPIO "macio-gpio"
-#define MACIO_GPIO(obj) OBJECT_CHECK(MacIOGPIOState, (obj), TYPE_MACIO_GPIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOGPIOState, MACIO_GPIO)
-typedef struct MacIOGPIOState {
+struct MacIOGPIOState {
/*< private >*/
SysBusDevice parent;
/*< public >*/
- OpenPICState *pic;
-
MemoryRegion gpiomem;
qemu_irq gpio_extirqs[10];
uint8_t gpio_levels[8];
uint8_t gpio_regs[36]; /* XXX Check count */
-} MacIOGPIOState;
+};
void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state);
diff --git a/include/hw/misc/macio/macio.h b/include/hw/misc/macio/macio.h
index 87335a991c..2b54da6b31 100644
--- a/include/hw/misc/macio/macio.h
+++ b/include/hw/misc/macio/macio.h
@@ -27,30 +27,52 @@
#define MACIO_H
#include "hw/char/escc.h"
-#include "hw/pci/pci.h"
-#include "hw/ide/internal.h"
+#include "hw/pci/pci_device.h"
+#include "hw/ide/ide-bus.h"
#include "hw/intc/heathrow_pic.h"
#include "hw/misc/macio/cuda.h"
#include "hw/misc/macio/gpio.h"
#include "hw/misc/macio/pmu.h"
-#include "hw/ppc/mac.h"
+#include "hw/nvram/mac_nvram.h"
#include "hw/ppc/mac_dbdma.h"
#include "hw/ppc/openpic.h"
+#include "qom/object.h"
+
+/* Old World IRQs */
+#define OLDWORLD_CUDA_IRQ 0x12
+#define OLDWORLD_ESCCB_IRQ 0x10
+#define OLDWORLD_ESCCA_IRQ 0xf
+#define OLDWORLD_IDE0_IRQ 0xd
+#define OLDWORLD_IDE0_DMA_IRQ 0x2
+#define OLDWORLD_IDE1_IRQ 0xe
+#define OLDWORLD_IDE1_DMA_IRQ 0x3
+
+/* New World IRQs */
+#define NEWWORLD_CUDA_IRQ 0x19
+#define NEWWORLD_PMU_IRQ 0x19
+#define NEWWORLD_ESCCB_IRQ 0x24
+#define NEWWORLD_ESCCA_IRQ 0x25
+#define NEWWORLD_IDE0_IRQ 0xd
+#define NEWWORLD_IDE0_DMA_IRQ 0x2
+#define NEWWORLD_IDE1_IRQ 0xe
+#define NEWWORLD_IDE1_DMA_IRQ 0x3
+#define NEWWORLD_EXTING_GPIO1 0x2f
+#define NEWWORLD_EXTING_GPIO9 0x37
/* MacIO virtual bus */
#define TYPE_MACIO_BUS "macio-bus"
-#define MACIO_BUS(obj) OBJECT_CHECK(MacIOBusState, (obj), TYPE_MACIO_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOBusState, MACIO_BUS)
-typedef struct MacIOBusState {
+struct MacIOBusState {
/*< private >*/
BusState parent_obj;
-} MacIOBusState;
+};
/* MacIO IDE */
#define TYPE_MACIO_IDE "macio-ide"
-#define MACIO_IDE(obj) OBJECT_CHECK(MACIOIDEState, (obj), TYPE_MACIO_IDE)
+OBJECT_DECLARE_SIMPLE_TYPE(MACIOIDEState, MACIO_IDE)
-typedef struct MACIOIDEState {
+struct MACIOIDEState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -68,15 +90,15 @@ typedef struct MACIOIDEState {
bool dma_active;
uint32_t timing_reg;
uint32_t irq_reg;
-} MACIOIDEState;
+};
void macio_ide_init_drives(MACIOIDEState *ide, DriveInfo **hd_table);
void macio_ide_register_dma(MACIOIDEState *ide);
#define TYPE_MACIO "macio"
-#define MACIO(obj) OBJECT_CHECK(MacIOState, (obj), TYPE_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MacIOState, MACIO)
-typedef struct MacIOState {
+struct MacIOState {
/*< private >*/
PCIDevice parent;
/*< public >*/
@@ -88,37 +110,35 @@ typedef struct MacIOState {
DBDMAState dbdma;
ESCCState escc;
uint64_t frequency;
-} MacIOState;
+};
#define TYPE_OLDWORLD_MACIO "macio-oldworld"
-#define OLDWORLD_MACIO(obj) \
- OBJECT_CHECK(OldWorldMacIOState, (obj), TYPE_OLDWORLD_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(OldWorldMacIOState, OLDWORLD_MACIO)
-typedef struct OldWorldMacIOState {
+struct OldWorldMacIOState {
/*< private >*/
MacIOState parent_obj;
/*< public >*/
- HeathrowState *pic;
+ HeathrowState pic;
MacIONVRAMState nvram;
MACIOIDEState ide[2];
-} OldWorldMacIOState;
+};
#define TYPE_NEWWORLD_MACIO "macio-newworld"
-#define NEWWORLD_MACIO(obj) \
- OBJECT_CHECK(NewWorldMacIOState, (obj), TYPE_NEWWORLD_MACIO)
+OBJECT_DECLARE_SIMPLE_TYPE(NewWorldMacIOState, NEWWORLD_MACIO)
-typedef struct NewWorldMacIOState {
+struct NewWorldMacIOState {
/*< private >*/
MacIOState parent_obj;
/*< public >*/
bool has_pmu;
bool has_adb;
- OpenPICState *pic;
+ OpenPICState pic;
MACIOIDEState ide[2];
MacIOGPIOState gpio;
-} NewWorldMacIOState;
+};
#endif /* MACIO_H */
diff --git a/include/hw/misc/macio/pmu.h b/include/hw/misc/macio/pmu.h
index 72f75612b6..ceb12082ae 100644
--- a/include/hw/misc/macio/pmu.h
+++ b/include/hw/misc/macio/pmu.h
@@ -10,8 +10,10 @@
#ifndef PMU_H
#define PMU_H
+#include "hw/input/adb.h"
#include "hw/misc/mos6522.h"
#include "hw/misc/macio/gpio.h"
+#include "qom/object.h"
/*
* PMU commands
@@ -74,7 +76,7 @@
#define PMU_INT_WAITING_CHARGER 0x01 /* ??? */
#define PMU_INT_AUTO_SRQ_POLL 0x02 /* ??? */
-/* Bits in the environement message (either obtained via PMU_GET_COVER,
+/* Bits in the environment message (either obtained via PMU_GET_COVER,
* or via PMU_INT_ENVIRONMENT on core99 */
#define PMU_ENV_LID_CLOSED 0x01 /* The lid is closed */
@@ -173,28 +175,25 @@ typedef enum {
} PMUCmdState;
/* MOS6522 PMU */
-typedef struct MOS6522PMUState {
+struct MOS6522PMUState {
/*< private >*/
MOS6522State parent_obj;
-} MOS6522PMUState;
+};
#define TYPE_MOS6522_PMU "mos6522-pmu"
-#define MOS6522_PMU(obj) OBJECT_CHECK(MOS6522PMUState, (obj), \
- TYPE_MOS6522_PMU)
+OBJECT_DECLARE_SIMPLE_TYPE(MOS6522PMUState, MOS6522_PMU)
/**
* PMUState:
* @last_b: last value of B register
*/
-typedef struct PMUState {
+struct PMUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mem;
uint64_t frequency;
- qemu_irq via_irq;
- bool via_irq_state;
/* PMU state */
MOS6522PMUState mos6522_pmu;
@@ -228,9 +227,9 @@ typedef struct PMUState {
/* GPIO */
MacIOGPIOState *gpio;
-} PMUState;
+};
#define TYPE_VIA_PMU "via-pmu"
-#define VIA_PMU(obj) OBJECT_CHECK(PMUState, (obj), TYPE_VIA_PMU)
+OBJECT_DECLARE_SIMPLE_TYPE(PMUState, VIA_PMU)
#endif /* PMU_H */
diff --git a/include/hw/misc/mchp_pfsoc_dmc.h b/include/hw/misc/mchp_pfsoc_dmc.h
new file mode 100644
index 0000000000..3bc1581e0f
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_dmc.h
@@ -0,0 +1,58 @@
+/*
+ * Microchip PolarFire SoC DDR Memory Controller module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_DMC_H
+#define MCHP_PFSOC_DMC_H
+
+#include "hw/sysbus.h"
+
+/* DDR SGMII PHY module */
+
+#define MCHP_PFSOC_DDR_SGMII_PHY_REG_SIZE 0x1000
+
+typedef struct MchpPfSoCDdrSgmiiPhyState {
+ SysBusDevice parent;
+ MemoryRegion sgmii_phy;
+} MchpPfSoCDdrSgmiiPhyState;
+
+#define TYPE_MCHP_PFSOC_DDR_SGMII_PHY "mchp.pfsoc.ddr_sgmii_phy"
+
+#define MCHP_PFSOC_DDR_SGMII_PHY(obj) \
+ OBJECT_CHECK(MchpPfSoCDdrSgmiiPhyState, (obj), \
+ TYPE_MCHP_PFSOC_DDR_SGMII_PHY)
+
+/* DDR CFG module */
+
+#define MCHP_PFSOC_DDR_CFG_REG_SIZE 0x40000
+
+typedef struct MchpPfSoCDdrCfgState {
+ SysBusDevice parent;
+ MemoryRegion cfg;
+} MchpPfSoCDdrCfgState;
+
+#define TYPE_MCHP_PFSOC_DDR_CFG "mchp.pfsoc.ddr_cfg"
+
+#define MCHP_PFSOC_DDR_CFG(obj) \
+ OBJECT_CHECK(MchpPfSoCDdrCfgState, (obj), \
+ TYPE_MCHP_PFSOC_DDR_CFG)
+
+#endif /* MCHP_PFSOC_DMC_H */
diff --git a/include/hw/misc/mchp_pfsoc_ioscb.h b/include/hw/misc/mchp_pfsoc_ioscb.h
new file mode 100644
index 0000000000..3fd3e74966
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_ioscb.h
@@ -0,0 +1,56 @@
+/*
+ * Microchip PolarFire SoC IOSCB module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_IOSCB_H
+#define MCHP_PFSOC_IOSCB_H
+
+#include "hw/sysbus.h"
+
+typedef struct MchpPfSoCIoscbState {
+ SysBusDevice parent;
+ MemoryRegion container;
+ MemoryRegion lane01;
+ MemoryRegion lane23;
+ MemoryRegion ctrl;
+ MemoryRegion qspixip;
+ MemoryRegion mailbox;
+ MemoryRegion cfg;
+ MemoryRegion ccc;
+ MemoryRegion pll_mss;
+ MemoryRegion cfm_mss;
+ MemoryRegion pll_ddr;
+ MemoryRegion bc_ddr;
+ MemoryRegion io_calib_ddr;
+ MemoryRegion pll_sgmii;
+ MemoryRegion dll_sgmii;
+ MemoryRegion cfm_sgmii;
+ MemoryRegion bc_sgmii;
+ MemoryRegion io_calib_sgmii;
+ qemu_irq irq;
+} MchpPfSoCIoscbState;
+
+#define TYPE_MCHP_PFSOC_IOSCB "mchp.pfsoc.ioscb"
+
+#define MCHP_PFSOC_IOSCB(obj) \
+ OBJECT_CHECK(MchpPfSoCIoscbState, (obj), TYPE_MCHP_PFSOC_IOSCB)
+
+#endif /* MCHP_PFSOC_IOSCB_H */
diff --git a/include/hw/misc/mchp_pfsoc_sysreg.h b/include/hw/misc/mchp_pfsoc_sysreg.h
new file mode 100644
index 0000000000..c2232bd28d
--- /dev/null
+++ b/include/hw/misc/mchp_pfsoc_sysreg.h
@@ -0,0 +1,42 @@
+/*
+ * Microchip PolarFire SoC SYSREG module emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MCHP_PFSOC_SYSREG_H
+#define MCHP_PFSOC_SYSREG_H
+
+#include "hw/sysbus.h"
+
+#define MCHP_PFSOC_SYSREG_REG_SIZE 0x2000
+
+typedef struct MchpPfSoCSysregState {
+ SysBusDevice parent;
+ MemoryRegion sysreg;
+ qemu_irq irq;
+} MchpPfSoCSysregState;
+
+#define TYPE_MCHP_PFSOC_SYSREG "mchp.pfsoc.sysreg"
+
+#define MCHP_PFSOC_SYSREG(obj) \
+ OBJECT_CHECK(MchpPfSoCSysregState, (obj), \
+ TYPE_MCHP_PFSOC_SYSREG)
+
+#endif /* MCHP_PFSOC_SYSREG_H */
diff --git a/include/hw/misc/mips_cmgcr.h b/include/hw/misc/mips_cmgcr.h
index 3e6e223273..db4bf5f449 100644
--- a/include/hw/misc/mips_cmgcr.h
+++ b/include/hw/misc/mips_cmgcr.h
@@ -11,9 +11,10 @@
#define MIPS_CMGCR_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_MIPS_GCR "mips-gcr"
-#define MIPS_GCR(obj) OBJECT_CHECK(MIPSGCRState, (obj), TYPE_MIPS_GCR)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSGCRState, MIPS_GCR)
#define GCR_BASE_ADDR 0x1fbf8000ULL
#define GCR_ADDRSPACE_SZ 0x8000
@@ -70,12 +71,11 @@ struct MIPSGCRVPState {
uint64_t reset_base;
};
-typedef struct MIPSGCRState MIPSGCRState;
struct MIPSGCRState {
SysBusDevice parent_obj;
int32_t gcr_rev;
- int32_t num_vps;
+ uint32_t num_vps;
hwaddr gcr_base;
MemoryRegion iomem;
MemoryRegion *cpc_mr;
diff --git a/include/hw/misc/mips_cpc.h b/include/hw/misc/mips_cpc.h
index 3f670578b0..fcafbd5e00 100644
--- a/include/hw/misc/mips_cpc.h
+++ b/include/hw/misc/mips_cpc.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,6 +21,7 @@
#define MIPS_CPC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define CPC_ADDRSPACE_SZ 0x6000
@@ -34,9 +35,9 @@
#define CPC_VP_RUNNING_OFS 0x30
#define TYPE_MIPS_CPC "mips-cpc"
-#define MIPS_CPC(obj) OBJECT_CHECK(MIPSCPCState, (obj), TYPE_MIPS_CPC)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSCPCState, MIPS_CPC)
-typedef struct MIPSCPCState {
+struct MIPSCPCState {
SysBusDevice parent_obj;
uint32_t num_vp;
@@ -44,6 +45,6 @@ typedef struct MIPSCPCState {
MemoryRegion mr;
uint64_t vp_running; /* Indicates which VPs are in the run state */
-} MIPSCPCState;
+};
#endif /* MIPS_CPC_H */
diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h
index c44e7672b6..27c9a1090d 100644
--- a/include/hw/misc/mips_itu.h
+++ b/include/hw/misc/mips_itu.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,9 +21,10 @@
#define MIPS_ITU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_MIPS_ITU "mips-itu"
-#define MIPS_ITU(obj) OBJECT_CHECK(MIPSITUState, (obj), TYPE_MIPS_ITU)
+OBJECT_DECLARE_SIMPLE_TYPE(MIPSITUState, MIPS_ITU)
#define ITC_CELL_DEPTH_SHIFT 2
#define ITC_CELL_DEPTH (1u << ITC_CELL_DEPTH_SHIFT)
@@ -51,13 +52,13 @@ typedef struct ITCStorageCell {
#define ITC_ADDRESSMAP_NUM 2
-typedef struct MIPSITUState {
+struct MIPSITUState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
- int32_t num_fifo;
- int32_t num_semaphores;
+ uint32_t num_fifo;
+ uint32_t num_semaphores;
/* ITC Storage */
ITCStorageCell *cell;
@@ -69,12 +70,7 @@ typedef struct MIPSITUState {
/* ITU Control Register */
uint64_t icr0;
-
- /* SAAR */
- bool saar_present;
- void *saar;
-
-} MIPSITUState;
+};
/* Get ITC Configuration Tag memory region. */
MemoryRegion *mips_itu_get_tag_region(MIPSITUState *itu);
diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h
index 97384c6e02..fba45668ab 100644
--- a/include/hw/misc/mos6522.h
+++ b/include/hw/misc/mos6522.h
@@ -27,9 +27,11 @@
#ifndef MOS6522_H
#define MOS6522_H
-#include "exec/memory.h"
+#include "exec/hwaddr.h"
#include "hw/sysbus.h"
-#include "hw/input/adb.h"
+#include "qom/object.h"
+
+#define MOS6522_NUM_REGS 16
/* Bits in ACR */
#define SR_CTRL 0x1c /* Shift register control bits */
@@ -40,18 +42,43 @@
#define IER_SET 0x80 /* set bits in IER */
#define IER_CLR 0 /* clear bits in IER */
-#define CA2_INT 0x01
-#define CA1_INT 0x02
-#define SR_INT 0x04 /* Shift register full/empty */
-#define CB2_INT 0x08
-#define CB1_INT 0x10
-#define T2_INT 0x20 /* Timer 2 interrupt */
-#define T1_INT 0x40 /* Timer 1 interrupt */
+#define CA2_INT_BIT 0
+#define CA1_INT_BIT 1
+#define SR_INT_BIT 2 /* Shift register full/empty */
+#define CB2_INT_BIT 3
+#define CB1_INT_BIT 4
+#define T2_INT_BIT 5 /* Timer 2 interrupt */
+#define T1_INT_BIT 6 /* Timer 1 interrupt */
+
+#define CA2_INT BIT(CA2_INT_BIT)
+#define CA1_INT BIT(CA1_INT_BIT)
+#define SR_INT BIT(SR_INT_BIT)
+#define CB2_INT BIT(CB2_INT_BIT)
+#define CB1_INT BIT(CB1_INT_BIT)
+#define T2_INT BIT(T2_INT_BIT)
+#define T1_INT BIT(T1_INT_BIT)
+
+#define VIA_NUM_INTS 5
/* Bits in ACR */
#define T1MODE 0xc0 /* Timer 1 mode */
#define T1MODE_CONT 0x40 /* continuous interrupts */
+/* Bits in PCR */
+#define CB2_CTRL_MASK 0xe0
+#define CB2_CTRL_SHIFT 5
+#define CB1_CTRL_MASK 0x10
+#define CB1_CTRL_SHIFT 4
+#define CA2_CTRL_MASK 0x0e
+#define CA2_CTRL_SHIFT 1
+#define CA1_CTRL_MASK 0x1
+#define CA1_CTRL_SHIFT 0
+
+#define C2_POS 0x2
+#define C2_IND 0x1
+
+#define C1_POS 0x1
+
/* VIA registers */
#define VIA_REG_B 0x00
#define VIA_REG_A 0x01
@@ -99,7 +126,7 @@ typedef struct MOS6522Timer {
* @last_b: last value of B register
* @last_acr: last value of ACR register
*/
-typedef struct MOS6522State {
+struct MOS6522State {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -120,34 +147,31 @@ typedef struct MOS6522State {
uint64_t frequency;
qemu_irq irq;
-} MOS6522State;
+ uint8_t last_irq_levels;
+};
#define TYPE_MOS6522 "mos6522"
-#define MOS6522(obj) OBJECT_CHECK(MOS6522State, (obj), TYPE_MOS6522)
+OBJECT_DECLARE_TYPE(MOS6522State, MOS6522DeviceClass, MOS6522)
-typedef struct MOS6522DeviceClass {
+struct MOS6522DeviceClass {
DeviceClass parent_class;
- DeviceReset parent_reset;
- void (*set_sr_int)(MOS6522State *dev);
+ ResettablePhases parent_phases;
void (*portB_write)(MOS6522State *dev);
void (*portA_write)(MOS6522State *dev);
- void (*update_irq)(MOS6522State *dev);
/* These are used to influence the CUDA MacOS timebase calibration */
uint64_t (*get_timer1_counter_value)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer2_counter_value)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer1_load_time)(MOS6522State *dev, MOS6522Timer *ti);
uint64_t (*get_timer2_load_time)(MOS6522State *dev, MOS6522Timer *ti);
-} MOS6522DeviceClass;
+};
-#define MOS6522_DEVICE_CLASS(cls) \
- OBJECT_CLASS_CHECK(MOS6522DeviceClass, (cls), TYPE_MOS6522)
-#define MOS6522_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(MOS6522DeviceClass, (obj), TYPE_MOS6522)
extern const VMStateDescription vmstate_mos6522;
uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size);
void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
+void hmp_info_via(Monitor *mon, const QDict *qdict);
+
#endif /* MOS6522_H */
diff --git a/include/hw/misc/mps2-fpgaio.h b/include/hw/misc/mps2-fpgaio.h
index 69e265cd4b..7b8bd604de 100644
--- a/include/hw/misc/mps2-fpgaio.h
+++ b/include/hw/misc/mps2-fpgaio.h
@@ -12,7 +12,7 @@
/* This is a model of the FPGAIO register block in the AN505
* FPGA image for the MPS2 dev board; it is documented in the
* application note:
- * http://infocenter.arm.com/help/topic/com.arm.doc.dai0505b/index.html
+ * https://developer.arm.com/documentation/dai0505/latest/
*
* QEMU interface:
* + sysbus MMIO region 0: the register bank
@@ -22,20 +22,29 @@
#define MPS2_FPGAIO_H
#include "hw/sysbus.h"
+#include "hw/misc/led.h"
+#include "qom/object.h"
#define TYPE_MPS2_FPGAIO "mps2-fpgaio"
-#define MPS2_FPGAIO(obj) OBJECT_CHECK(MPS2FPGAIO, (obj), TYPE_MPS2_FPGAIO)
+OBJECT_DECLARE_SIMPLE_TYPE(MPS2FPGAIO, MPS2_FPGAIO)
-typedef struct {
+#define MPS2FPGAIO_MAX_LEDS 32
+
+struct MPS2FPGAIO {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
+ LEDState *led[MPS2FPGAIO_MAX_LEDS];
+ uint32_t num_leds;
+ bool has_switches;
+ bool has_dbgctrl;
uint32_t led0;
uint32_t prescale;
uint32_t misc;
+ uint32_t dbgctrl;
/* QEMU_CLOCK_VIRTUAL time at which counter and pscntr were last synced */
int64_t pscntr_sync_ticks;
@@ -48,6 +57,6 @@ typedef struct {
/* These hold the CLOCK_VIRTUAL ns tick when the CLK1HZ/CLK100HZ was zero */
int64_t clk1hz_tick_offset;
int64_t clk100hz_tick_offset;
-} MPS2FPGAIO;
+};
#endif
diff --git a/include/hw/misc/mps2-scc.h b/include/hw/misc/mps2-scc.h
index 7045473788..8ff188c06b 100644
--- a/include/hw/misc/mps2-scc.h
+++ b/include/hw/misc/mps2-scc.h
@@ -9,26 +9,49 @@
* (at your option) any later version.
*/
+/*
+ * This is a model of the Serial Communication Controller (SCC)
+ * block found in most MPS FPGA images.
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: the register bank
+ * + QOM property "scc-cfg4": value of the read-only CFG4 register
+ * + QOM property "scc-aid": value of the read-only SCC_AID register
+ * + QOM property "scc-id": value of the read-only SCC_ID register
+ * + QOM property "scc-cfg0": reset value of the CFG0 register
+ * + QOM property array "oscclk": reset values of the OSCCLK registers
+ * (which are accessed via the SYS_CFG channel provided by this device)
+ * + named GPIO output "remap": this tracks the value of CFG0 register
+ * bit 0. Boards where this bit controls memory remapping should
+ * connect this GPIO line to a function performing that mapping.
+ * Boards where bit 0 has no special function should leave the GPIO
+ * output disconnected.
+ */
#ifndef MPS2_SCC_H
#define MPS2_SCC_H
#include "hw/sysbus.h"
+#include "hw/misc/led.h"
+#include "qom/object.h"
#define TYPE_MPS2_SCC "mps2-scc"
-#define MPS2_SCC(obj) OBJECT_CHECK(MPS2SCC, (obj), TYPE_MPS2_SCC)
+OBJECT_DECLARE_SIMPLE_TYPE(MPS2SCC, MPS2_SCC)
-#define NUM_OSCCLK 3
-
-typedef struct {
+struct MPS2SCC {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
+ LEDState *led[8];
uint32_t cfg0;
uint32_t cfg1;
+ uint32_t cfg2;
uint32_t cfg4;
+ uint32_t cfg5;
+ uint32_t cfg6;
+ uint32_t cfg7;
uint32_t cfgdata_rtn;
uint32_t cfgdata_out;
uint32_t cfgctrl;
@@ -36,8 +59,12 @@ typedef struct {
uint32_t dll;
uint32_t aid;
uint32_t id;
- uint32_t oscclk[NUM_OSCCLK];
- uint32_t oscclk_reset[NUM_OSCCLK];
-} MPS2SCC;
+ uint32_t num_oscclk;
+ uint32_t *oscclk;
+ uint32_t *oscclk_reset;
+ uint32_t cfg0_reset;
+
+ qemu_irq remap;
+};
#endif
diff --git a/include/hw/misc/msf2-sysreg.h b/include/hw/misc/msf2-sysreg.h
index 5993f67b4e..fc1890e710 100644
--- a/include/hw/misc/msf2-sysreg.h
+++ b/include/hw/misc/msf2-sysreg.h
@@ -26,6 +26,7 @@
#define HW_MSF2_SYSREG_H
#include "hw/sysbus.h"
+#include "qom/object.h"
enum {
ESRAM_CR = 0x00 / 4,
@@ -61,9 +62,9 @@ enum {
#define MSF2_SYSREG_MMIO_SIZE 0x300
#define TYPE_MSF2_SYSREG "msf2-sysreg"
-#define MSF2_SYSREG(obj) OBJECT_CHECK(MSF2SysregState, (obj), TYPE_MSF2_SYSREG)
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2SysregState, MSF2_SYSREG)
-typedef struct MSF2SysregState {
+struct MSF2SysregState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -72,6 +73,6 @@ typedef struct MSF2SysregState {
uint8_t apb1div;
uint32_t regs[MSF2_SYSREG_MMIO_SIZE / 4];
-} MSF2SysregState;
+};
#endif /* HW_MSF2_SYSREG_H */
diff --git a/include/hw/misc/npcm7xx_clk.h b/include/hw/misc/npcm7xx_clk.h
new file mode 100644
index 0000000000..5ed4a4672b
--- /dev/null
+++ b/include/hw/misc/npcm7xx_clk.h
@@ -0,0 +1,180 @@
+/*
+ * Nuvoton NPCM7xx Clock Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_CLK_H
+#define NPCM7XX_CLK_H
+
+#include "exec/memory.h"
+#include "hw/clock.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t))
+
+#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in"
+
+/* Maximum amount of clock inputs in a SEL module. */
+#define NPCM7XX_CLK_SEL_MAX_INPUT 5
+
+/* PLLs in CLK module. */
+typedef enum NPCM7xxClockPLL {
+ NPCM7XX_CLOCK_PLL0,
+ NPCM7XX_CLOCK_PLL1,
+ NPCM7XX_CLOCK_PLL2,
+ NPCM7XX_CLOCK_PLLG,
+ NPCM7XX_CLOCK_NR_PLLS,
+} NPCM7xxClockPLL;
+
+/* SEL/MUX in CLK module. */
+typedef enum NPCM7xxClockSEL {
+ NPCM7XX_CLOCK_PIXCKSEL,
+ NPCM7XX_CLOCK_MCCKSEL,
+ NPCM7XX_CLOCK_CPUCKSEL,
+ NPCM7XX_CLOCK_CLKOUTSEL,
+ NPCM7XX_CLOCK_UARTCKSEL,
+ NPCM7XX_CLOCK_TIMCKSEL,
+ NPCM7XX_CLOCK_SDCKSEL,
+ NPCM7XX_CLOCK_GFXMSEL,
+ NPCM7XX_CLOCK_SUCKSEL,
+ NPCM7XX_CLOCK_NR_SELS,
+} NPCM7xxClockSEL;
+
+/* Dividers in CLK module. */
+typedef enum NPCM7xxClockDivider {
+ NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */
+ NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */
+ NPCM7XX_CLOCK_MC_DIVIDER,
+ NPCM7XX_CLOCK_AXI_DIVIDER,
+ NPCM7XX_CLOCK_AHB_DIVIDER,
+ NPCM7XX_CLOCK_AHB3_DIVIDER,
+ NPCM7XX_CLOCK_SPI0_DIVIDER,
+ NPCM7XX_CLOCK_SPIX_DIVIDER,
+ NPCM7XX_CLOCK_APB1_DIVIDER,
+ NPCM7XX_CLOCK_APB2_DIVIDER,
+ NPCM7XX_CLOCK_APB3_DIVIDER,
+ NPCM7XX_CLOCK_APB4_DIVIDER,
+ NPCM7XX_CLOCK_APB5_DIVIDER,
+ NPCM7XX_CLOCK_CLKOUT_DIVIDER,
+ NPCM7XX_CLOCK_UART_DIVIDER,
+ NPCM7XX_CLOCK_TIMER_DIVIDER,
+ NPCM7XX_CLOCK_ADC_DIVIDER,
+ NPCM7XX_CLOCK_MMC_DIVIDER,
+ NPCM7XX_CLOCK_SDHC_DIVIDER,
+ NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */
+ NPCM7XX_CLOCK_UTMI_DIVIDER,
+ NPCM7XX_CLOCK_NR_DIVIDERS,
+} NPCM7xxClockConverter;
+
+typedef struct NPCM7xxCLKState NPCM7xxCLKState;
+
+/**
+ * struct NPCM7xxClockPLLState - A PLL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @reg: The control registers for this PLL module.
+ */
+typedef struct NPCM7xxClockPLLState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ int reg;
+} NPCM7xxClockPLLState;
+
+/**
+ * struct NPCM7xxClockSELState - A SEL module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @input_size: The size of inputs of this module.
+ * @clock_in: The input clocks of this module.
+ * @clock_out: The output clocks of this module.
+ * @offset: The offset of this module in the control register.
+ * @len: The length of this module in the control register.
+ */
+typedef struct NPCM7xxClockSELState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ uint8_t input_size;
+ Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT];
+ Clock *clock_out;
+
+ int offset;
+ int len;
+} NPCM7xxClockSELState;
+
+/**
+ * struct NPCM7xxClockDividerState - A Divider module in CLK module.
+ * @name: The name of the module.
+ * @clk: The CLK module that owns this module.
+ * @clock_in: The input clock of this module.
+ * @clock_out: The output clock of this module.
+ * @divide: The function the divider uses to divide the input.
+ * @reg: The index of the control register that contains the divisor.
+ * @offset: The offset of the divisor in the control register.
+ * @len: The length of the divisor in the control register.
+ * @divisor: The divisor for a constant divisor
+ */
+typedef struct NPCM7xxClockDividerState {
+ DeviceState parent;
+
+ const char *name;
+ NPCM7xxCLKState *clk;
+ Clock *clock_in;
+ Clock *clock_out;
+
+ uint32_t (*divide)(struct NPCM7xxClockDividerState *s);
+ union {
+ struct {
+ int reg;
+ int offset;
+ int len;
+ };
+ int divisor;
+ };
+} NPCM7xxClockDividerState;
+
+struct NPCM7xxCLKState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ /* Clock converters */
+ NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS];
+ NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS];
+ NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS];
+
+ uint32_t regs[NPCM7XX_CLK_NR_REGS];
+
+ /* Time reference for SECCNT and CNTR25M, initialized by power on reset */
+ int64_t ref_ns;
+
+ /* The incoming reference clock. */
+ Clock *clkref;
+};
+
+#define TYPE_NPCM7XX_CLK "npcm7xx-clk"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxCLKState, NPCM7XX_CLK)
+
+#endif /* NPCM7XX_CLK_H */
diff --git a/include/hw/misc/npcm7xx_gcr.h b/include/hw/misc/npcm7xx_gcr.h
new file mode 100644
index 0000000000..c0bbdda77e
--- /dev/null
+++ b/include/hw/misc/npcm7xx_gcr.h
@@ -0,0 +1,73 @@
+/*
+ * Nuvoton NPCM7xx System Global Control Registers.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_GCR_H
+#define NPCM7XX_GCR_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/*
+ * NPCM7XX PWRON STRAP bit fields
+ * 12: SPI0 powered by VSBV3 at 1.8V
+ * 11: System flash attached to BMC
+ * 10: BSP alternative pins.
+ * 9:8: Flash UART command route enabled.
+ * 7: Security enabled.
+ * 6: HI-Z state control.
+ * 5: ECC disabled.
+ * 4: Reserved
+ * 3: JTAG2 enabled.
+ * 2:0: CPU and DRAM clock frequency.
+ */
+#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12)
+#define NPCM7XX_PWRON_STRAP_SFAB BIT(11)
+#define NPCM7XX_PWRON_STRAP_BSPA BIT(10)
+#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8)
+#define FUP_NORM_UART2 3
+#define FUP_PROG_UART3 2
+#define FUP_PROG_UART2 1
+#define FUP_NORM_UART3 0
+#define NPCM7XX_PWRON_STRAP_SECEN BIT(7)
+#define NPCM7XX_PWRON_STRAP_HIZ BIT(6)
+#define NPCM7XX_PWRON_STRAP_ECC BIT(5)
+#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4)
+#define NPCM7XX_PWRON_STRAP_J2EN BIT(3)
+#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x)
+#define CKFRQ_SKIPINIT 0x000
+#define CKFRQ_DEFAULT 0x111
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t))
+
+struct NPCM7xxGCRState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint32_t regs[NPCM7XX_GCR_NR_REGS];
+
+ uint32_t reset_pwron;
+ uint32_t reset_mdlr;
+ uint32_t reset_intcr3;
+};
+
+#define TYPE_NPCM7XX_GCR "npcm7xx-gcr"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxGCRState, NPCM7XX_GCR)
+
+#endif /* NPCM7XX_GCR_H */
diff --git a/include/hw/misc/npcm7xx_mft.h b/include/hw/misc/npcm7xx_mft.h
new file mode 100644
index 0000000000..d6384382ce
--- /dev/null
+++ b/include/hw/misc/npcm7xx_mft.h
@@ -0,0 +1,69 @@
+/*
+ * Nuvoton NPCM7xx MFT Module
+ *
+ * Copyright 2021 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_MFT_H
+#define NPCM7XX_MFT_H
+
+#include "exec/memory.h"
+#include "hw/clock.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+/* Max Fan input number. */
+#define NPCM7XX_MFT_MAX_FAN_INPUT 19
+
+/*
+ * Number of registers in one MFT module. Don't change this without increasing
+ * the version_id in vmstate.
+ */
+#define NPCM7XX_MFT_NR_REGS (0x20 / sizeof(uint16_t))
+
+/*
+ * The MFT can take up to 4 inputs: A0, B0, A1, B1. It can measure one A and one
+ * B simultaneously. NPCM7XX_MFT_INASEL and NPCM7XX_MFT_INBSEL are used to
+ * select which A or B input are used.
+ */
+#define NPCM7XX_MFT_FANIN_COUNT 4
+
+/**
+ * struct NPCM7xxMFTState - Multi Functional Tachometer device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @clock_in: The input clock for MFT from CLK module.
+ * @clock_{1,2}: The counter clocks for NPCM7XX_MFT_CNT{1,2}
+ * @irq: The IRQ for this MFT state.
+ * @regs: The MMIO registers.
+ * @max_rpm: The maximum rpm for fans. Order: A0, B0, A1, B1.
+ * @duty: The duty cycles for fans, relative to NPCM7XX_PWM_MAX_DUTY.
+ */
+struct NPCM7xxMFTState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ Clock *clock_in;
+ Clock *clock_1, *clock_2;
+ qemu_irq irq;
+ uint16_t regs[NPCM7XX_MFT_NR_REGS];
+
+ uint32_t max_rpm[NPCM7XX_MFT_FANIN_COUNT];
+ uint32_t duty[NPCM7XX_MFT_FANIN_COUNT];
+};
+
+#define TYPE_NPCM7XX_MFT "npcm7xx-mft"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxMFTState, NPCM7XX_MFT)
+
+#endif /* NPCM7XX_MFT_H */
diff --git a/include/hw/misc/npcm7xx_pwm.h b/include/hw/misc/npcm7xx_pwm.h
new file mode 100644
index 0000000000..bf953440ac
--- /dev/null
+++ b/include/hw/misc/npcm7xx_pwm.h
@@ -0,0 +1,106 @@
+/*
+ * Nuvoton NPCM7xx PWM Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_PWM_H
+#define NPCM7XX_PWM_H
+
+#include "hw/clock.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+
+/* Each PWM module holds 4 PWM channels. */
+#define NPCM7XX_PWM_PER_MODULE 4
+
+/*
+ * Number of registers in one pwm module. Don't change this without increasing
+ * the version_id in vmstate.
+ */
+#define NPCM7XX_PWM_NR_REGS (0x54 / sizeof(uint32_t))
+
+/*
+ * The maximum duty values. Each duty unit represents 1/NPCM7XX_PWM_MAX_DUTY
+ * cycles. For example, if NPCM7XX_PWM_MAX_DUTY=1,000,000 and a PWM has a duty
+ * value of 100,000 the duty cycle for that PWM is 10%.
+ */
+#define NPCM7XX_PWM_MAX_DUTY 1000000
+
+typedef struct NPCM7xxPWMState NPCM7xxPWMState;
+
+/**
+ * struct NPCM7xxPWM - The state of a single PWM channel.
+ * @module: The PWM module that contains this channel.
+ * @irq: GIC interrupt line to fire on expiration if enabled.
+ * @running: Whether this PWM channel is generating output.
+ * @inverted: Whether this PWM channel is inverted.
+ * @index: The index of this PWM channel.
+ * @cnr: The counter register.
+ * @cmr: The comparator register.
+ * @pdr: The data register.
+ * @pwdr: The watchdog register.
+ * @freq: The frequency of this PWM channel.
+ * @duty: The duty cycle of this PWM channel. One unit represents
+ * 1/NPCM7XX_MAX_DUTY cycles.
+ */
+typedef struct NPCM7xxPWM {
+ NPCM7xxPWMState *module;
+
+ qemu_irq irq;
+
+ bool running;
+ bool inverted;
+
+ uint8_t index;
+ uint32_t cnr;
+ uint32_t cmr;
+ uint32_t pdr;
+ uint32_t pwdr;
+
+ uint32_t freq;
+ uint32_t duty;
+} NPCM7xxPWM;
+
+/**
+ * struct NPCM7xxPWMState - Pulse Width Modulation device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @clock: The PWM clock.
+ * @pwm: The PWM channels owned by this module.
+ * @duty_gpio_out: The duty cycle of each PWM channels as a output GPIO.
+ * @ppr: The prescaler register.
+ * @csr: The clock selector register.
+ * @pcr: The control register.
+ * @pier: The interrupt enable register.
+ * @piir: The interrupt indication register.
+ */
+struct NPCM7xxPWMState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ Clock *clock;
+ NPCM7xxPWM pwm[NPCM7XX_PWM_PER_MODULE];
+ qemu_irq duty_gpio_out[NPCM7XX_PWM_PER_MODULE];
+
+ uint32_t ppr;
+ uint32_t csr;
+ uint32_t pcr;
+ uint32_t pier;
+ uint32_t piir;
+};
+
+#define TYPE_NPCM7XX_PWM "npcm7xx-pwm"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxPWMState, NPCM7XX_PWM)
+
+#endif /* NPCM7XX_PWM_H */
diff --git a/include/hw/misc/npcm7xx_rng.h b/include/hw/misc/npcm7xx_rng.h
new file mode 100644
index 0000000000..650375dc2c
--- /dev/null
+++ b/include/hw/misc/npcm7xx_rng.h
@@ -0,0 +1,34 @@
+/*
+ * Nuvoton NPCM7xx Random Number Generator.
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_RNG_H
+#define NPCM7XX_RNG_H
+
+#include "hw/sysbus.h"
+
+struct NPCM7xxRNGState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint8_t rngcs;
+ uint8_t rngd;
+ uint8_t rngmode;
+};
+
+#define TYPE_NPCM7XX_RNG "npcm7xx-rng"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxRNGState, NPCM7XX_RNG)
+
+#endif /* NPCM7XX_RNG_H */
diff --git a/include/hw/misc/nrf51_rng.h b/include/hw/misc/nrf51_rng.h
index b0133bf665..9aff9a76f8 100644
--- a/include/hw/misc/nrf51_rng.h
+++ b/include/hw/misc/nrf51_rng.h
@@ -36,8 +36,9 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_NRF51_RNG "nrf51_soc.rng"
-#define NRF51_RNG(obj) OBJECT_CHECK(NRF51RNGState, (obj), TYPE_NRF51_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51RNGState, NRF51_RNG)
#define NRF51_RNG_SIZE 0x1000
@@ -54,7 +55,7 @@
#define NRF51_RNG_REG_CONFIG_DECEN 0
#define NRF51_RNG_REG_VALUE 0x508
-typedef struct {
+struct NRF51RNGState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -78,7 +79,7 @@ typedef struct {
uint32_t interrupt_enabled;
uint32_t filter_enabled;
-} NRF51RNGState;
+};
#endif /* NRF51_RNG_H */
diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h
index ae0c8188ce..fab94165d0 100644
--- a/include/hw/misc/pvpanic.h
+++ b/include/hw/misc/pvpanic.h
@@ -15,19 +15,23 @@
#ifndef HW_MISC_PVPANIC_H
#define HW_MISC_PVPANIC_H
+#include "exec/memory.h"
#include "qom/object.h"
-#define TYPE_PVPANIC "pvpanic"
+#define TYPE_PVPANIC_ISA_DEVICE "pvpanic"
+#define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci"
#define PVPANIC_IOPORT_PROP "ioport"
-static inline uint16_t pvpanic_port(void)
-{
- Object *o = object_resolve_path_type("", TYPE_PVPANIC, NULL);
- if (!o) {
- return 0;
- }
- return object_property_get_uint(o, PVPANIC_IOPORT_PROP, NULL);
-}
+/*
+ * PVPanicState for any device type
+ */
+typedef struct PVPanicState PVPanicState;
+struct PVPanicState {
+ MemoryRegion mr;
+ uint8_t events;
+};
+
+void pvpanic_setup_io(PVPanicState *s, DeviceState *dev, unsigned size);
#endif
diff --git a/include/hw/misc/sifive_e_aon.h b/include/hw/misc/sifive_e_aon.h
new file mode 100644
index 0000000000..2ae1c4139c
--- /dev/null
+++ b/include/hw/misc/sifive_e_aon.h
@@ -0,0 +1,60 @@
+/*
+ * SiFive HiFive1 AON (Always On Domain) interface.
+ *
+ * Copyright (c) 2022 SiFive, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_AON_H
+#define HW_SIFIVE_AON_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_E_AON "riscv.sifive.e.aon"
+OBJECT_DECLARE_SIMPLE_TYPE(SiFiveEAONState, SIFIVE_E_AON)
+
+#define SIFIVE_E_AON_WDOGKEY (0x51F15E)
+#define SIFIVE_E_AON_WDOGFEED (0xD09F00D)
+#define SIFIVE_E_LFCLK_DEFAULT_FREQ (32768)
+
+enum {
+ SIFIVE_E_AON_WDT = 0x0,
+ SIFIVE_E_AON_RTC = 0x40,
+ SIFIVE_E_AON_LFROSC = 0x70,
+ SIFIVE_E_AON_BACKUP = 0x80,
+ SIFIVE_E_AON_PMU = 0x100,
+ SIFIVE_E_AON_MAX = 0x150
+};
+
+struct SiFiveEAONState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion mmio;
+
+ /*< watchdog timer >*/
+ QEMUTimer *wdog_timer;
+ qemu_irq wdog_irq;
+ uint64_t wdog_restart_time;
+ uint64_t wdogclk_freq;
+
+ uint32_t wdogcfg;
+ uint16_t wdogcmp0;
+ uint32_t wdogcount;
+ uint8_t wdogunlock;
+};
+
+#endif
diff --git a/include/hw/riscv/sifive_e_prci.h b/include/hw/misc/sifive_e_prci.h
index 698b0b451c..6aa949e910 100644
--- a/include/hw/riscv/sifive_e_prci.h
+++ b/include/hw/misc/sifive_e_prci.h
@@ -19,6 +19,8 @@
#ifndef HW_SIFIVE_E_PRCI_H
#define HW_SIFIVE_E_PRCI_H
+#include "hw/sysbus.h"
+
enum {
SIFIVE_E_PRCI_HFROSCCFG = 0x0,
SIFIVE_E_PRCI_HFXOSCCFG = 0x4,
@@ -51,10 +53,11 @@ enum {
#define TYPE_SIFIVE_E_PRCI "riscv.sifive.e.prci"
-#define SIFIVE_E_PRCI(obj) \
- OBJECT_CHECK(SiFiveEPRCIState, (obj), TYPE_SIFIVE_E_PRCI)
+typedef struct SiFiveEPRCIState SiFiveEPRCIState;
+DECLARE_INSTANCE_CHECKER(SiFiveEPRCIState, SIFIVE_E_PRCI,
+ TYPE_SIFIVE_E_PRCI)
-typedef struct SiFiveEPRCIState {
+struct SiFiveEPRCIState {
/*< private >*/
SysBusDevice parent_obj;
@@ -64,7 +67,7 @@ typedef struct SiFiveEPRCIState {
uint32_t hfxosccfg;
uint32_t pllcfg;
uint32_t plloutdiv;
-} SiFiveEPRCIState;
+};
DeviceState *sifive_e_prci_create(hwaddr addr);
diff --git a/include/hw/riscv/sifive_test.h b/include/hw/misc/sifive_test.h
index 1ec416ac1b..88a38d00c5 100644
--- a/include/hw/riscv/sifive_test.h
+++ b/include/hw/misc/sifive_test.h
@@ -20,19 +20,21 @@
#define HW_SIFIVE_TEST_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_SIFIVE_TEST "riscv.sifive.test"
-#define SIFIVE_TEST(obj) \
- OBJECT_CHECK(SiFiveTestState, (obj), TYPE_SIFIVE_TEST)
+typedef struct SiFiveTestState SiFiveTestState;
+DECLARE_INSTANCE_CHECKER(SiFiveTestState, SIFIVE_TEST,
+ TYPE_SIFIVE_TEST)
-typedef struct SiFiveTestState {
+struct SiFiveTestState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion mmio;
-} SiFiveTestState;
+};
enum {
FINISHER_FAIL = 0x3333,
diff --git a/include/hw/riscv/sifive_u_otp.h b/include/hw/misc/sifive_u_otp.h
index 639297564a..170d2148f2 100644
--- a/include/hw/riscv/sifive_u_otp.h
+++ b/include/hw/misc/sifive_u_otp.h
@@ -19,6 +19,8 @@
#ifndef HW_SIFIVE_U_OTP_H
#define HW_SIFIVE_U_OTP_H
+#include "hw/sysbus.h"
+
#define SIFIVE_U_OTP_PA 0x00
#define SIFIVE_U_OTP_PAIO 0x04
#define SIFIVE_U_OTP_PAS 0x08
@@ -35,6 +37,8 @@
#define SIFIVE_U_OTP_PTRIM 0x34
#define SIFIVE_U_OTP_PWE 0x38
+#define SIFIVE_U_OTP_PWE_EN (1 << 0)
+
#define SIFIVE_U_OTP_PCE_EN (1 << 0)
#define SIFIVE_U_OTP_PDSTB_EN (1 << 0)
@@ -43,16 +47,18 @@
#define SIFIVE_U_OTP_PA_MASK 0xfff
#define SIFIVE_U_OTP_NUM_FUSES 0x1000
+#define SIFIVE_U_OTP_FUSE_WORD 4
#define SIFIVE_U_OTP_SERIAL_ADDR 0xfc
#define SIFIVE_U_OTP_REG_SIZE 0x1000
#define TYPE_SIFIVE_U_OTP "riscv.sifive.u.otp"
-#define SIFIVE_U_OTP(obj) \
- OBJECT_CHECK(SiFiveUOTPState, (obj), TYPE_SIFIVE_U_OTP)
+typedef struct SiFiveUOTPState SiFiveUOTPState;
+DECLARE_INSTANCE_CHECKER(SiFiveUOTPState, SIFIVE_U_OTP,
+ TYPE_SIFIVE_U_OTP)
-typedef struct SiFiveUOTPState {
+struct SiFiveUOTPState {
/*< private >*/
SysBusDevice parent_obj;
@@ -73,8 +79,10 @@ typedef struct SiFiveUOTPState {
uint32_t ptrim;
uint32_t pwe;
uint32_t fuse[SIFIVE_U_OTP_NUM_FUSES];
+ uint32_t fuse_wo[SIFIVE_U_OTP_NUM_FUSES];
/* config */
uint32_t serial;
-} SiFiveUOTPState;
+ BlockBackend *blk;
+};
#endif /* HW_SIFIVE_U_OTP_H */
diff --git a/include/hw/riscv/sifive_u_prci.h b/include/hw/misc/sifive_u_prci.h
index 0a531fdadc..4d2491ad46 100644
--- a/include/hw/riscv/sifive_u_prci.h
+++ b/include/hw/misc/sifive_u_prci.h
@@ -19,6 +19,8 @@
#ifndef HW_SIFIVE_U_PRCI_H
#define HW_SIFIVE_U_PRCI_H
+#include "hw/sysbus.h"
+
#define SIFIVE_U_PRCI_HFXOSCCFG 0x00
#define SIFIVE_U_PRCI_COREPLLCFG0 0x04
#define SIFIVE_U_PRCI_DDRPLLCFG0 0x0C
@@ -58,10 +60,11 @@
#define TYPE_SIFIVE_U_PRCI "riscv.sifive.u.prci"
-#define SIFIVE_U_PRCI(obj) \
- OBJECT_CHECK(SiFiveUPRCIState, (obj), TYPE_SIFIVE_U_PRCI)
+typedef struct SiFiveUPRCIState SiFiveUPRCIState;
+DECLARE_INSTANCE_CHECKER(SiFiveUPRCIState, SIFIVE_U_PRCI,
+ TYPE_SIFIVE_U_PRCI)
-typedef struct SiFiveUPRCIState {
+struct SiFiveUPRCIState {
/*< private >*/
SysBusDevice parent_obj;
@@ -76,7 +79,7 @@ typedef struct SiFiveUPRCIState {
uint32_t coreclksel;
uint32_t devicesreset;
uint32_t clkmuxstatus;
-} SiFiveUPRCIState;
+};
/*
* Clock indexes for use by Device Tree data and the PRCI driver.
diff --git a/include/hw/misc/stm32f2xx_syscfg.h b/include/hw/misc/stm32f2xx_syscfg.h
index 84e06fdecf..8595a3b31b 100644
--- a/include/hw/misc/stm32f2xx_syscfg.h
+++ b/include/hw/misc/stm32f2xx_syscfg.h
@@ -26,6 +26,7 @@
#define HW_STM32F2XX_SYSCFG_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define SYSCFG_MEMRMP 0x00
#define SYSCFG_PMC 0x04
@@ -36,10 +37,9 @@
#define SYSCFG_CMPCR 0x20
#define TYPE_STM32F2XX_SYSCFG "stm32f2xx-syscfg"
-#define STM32F2XX_SYSCFG(obj) \
- OBJECT_CHECK(STM32F2XXSyscfgState, (obj), TYPE_STM32F2XX_SYSCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXSyscfgState, STM32F2XX_SYSCFG)
-typedef struct {
+struct STM32F2XXSyscfgState {
/* <private> */
SysBusDevice parent_obj;
@@ -53,8 +53,6 @@ typedef struct {
uint32_t syscfg_exticr3;
uint32_t syscfg_exticr4;
uint32_t syscfg_cmpcr;
-
- qemu_irq irq;
-} STM32F2XXSyscfgState;
+};
#endif /* HW_STM32F2XX_SYSCFG_H */
diff --git a/include/hw/misc/stm32f4xx_exti.h b/include/hw/misc/stm32f4xx_exti.h
index 707036a41b..fc11c595fa 100644
--- a/include/hw/misc/stm32f4xx_exti.h
+++ b/include/hw/misc/stm32f4xx_exti.h
@@ -22,11 +22,11 @@
* THE SOFTWARE.
*/
-#ifndef HW_STM_EXTI_H
-#define HW_STM_EXTI_H
+#ifndef HW_STM32F4XX_EXTI_H
+#define HW_STM32F4XX_EXTI_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
+#include "qom/object.h"
#define EXTI_IMR 0x00
#define EXTI_EMR 0x04
@@ -36,13 +36,12 @@
#define EXTI_PR 0x14
#define TYPE_STM32F4XX_EXTI "stm32f4xx-exti"
-#define STM32F4XX_EXTI(obj) \
- OBJECT_CHECK(STM32F4xxExtiState, (obj), TYPE_STM32F4XX_EXTI)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F4xxExtiState, STM32F4XX_EXTI)
#define NUM_GPIO_EVENT_IN_LINES 16
#define NUM_INTERRUPT_OUT_LINES 16
-typedef struct {
+struct STM32F4xxExtiState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -55,6 +54,6 @@ typedef struct {
uint32_t exti_pr;
qemu_irq irq[NUM_INTERRUPT_OUT_LINES];
-} STM32F4xxExtiState;
+};
#endif
diff --git a/include/hw/misc/stm32f4xx_syscfg.h b/include/hw/misc/stm32f4xx_syscfg.h
index c62c6629e5..9fce67f4b4 100644
--- a/include/hw/misc/stm32f4xx_syscfg.h
+++ b/include/hw/misc/stm32f4xx_syscfg.h
@@ -22,11 +22,11 @@
* THE SOFTWARE.
*/
-#ifndef HW_STM_SYSCFG_H
-#define HW_STM_SYSCFG_H
+#ifndef HW_STM32F4XX_SYSCFG_H
+#define HW_STM32F4XX_SYSCFG_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
+#include "qom/object.h"
#define SYSCFG_MEMRMP 0x00
#define SYSCFG_PMC 0x04
@@ -37,12 +37,11 @@
#define SYSCFG_CMPCR 0x20
#define TYPE_STM32F4XX_SYSCFG "stm32f4xx-syscfg"
-#define STM32F4XX_SYSCFG(obj) \
- OBJECT_CHECK(STM32F4xxSyscfgState, (obj), TYPE_STM32F4XX_SYSCFG)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F4xxSyscfgState, STM32F4XX_SYSCFG)
#define SYSCFG_NUM_EXTICR 4
-typedef struct {
+struct STM32F4xxSyscfgState {
/* <private> */
SysBusDevice parent_obj;
@@ -56,6 +55,6 @@ typedef struct {
qemu_irq irq;
qemu_irq gpio_out[16];
-} STM32F4xxSyscfgState;
+};
#endif
diff --git a/include/hw/misc/stm32l4x5_exti.h b/include/hw/misc/stm32l4x5_exti.h
new file mode 100644
index 0000000000..be961d2f01
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_exti.h
@@ -0,0 +1,51 @@
+/*
+ * STM32L4x5 EXTI (Extended interrupts and events controller)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is based on the stm32f4xx_exti by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_EXTI_H
+#define HW_STM32L4X5_EXTI_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_EXTI "stm32l4x5-exti"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5ExtiState, STM32L4X5_EXTI)
+
+#define EXTI_NUM_INTERRUPT_OUT_LINES 40
+#define EXTI_NUM_REGISTER 2
+
+struct Stm32l4x5ExtiState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t imr[EXTI_NUM_REGISTER];
+ uint32_t emr[EXTI_NUM_REGISTER];
+ uint32_t rtsr[EXTI_NUM_REGISTER];
+ uint32_t ftsr[EXTI_NUM_REGISTER];
+ uint32_t swier[EXTI_NUM_REGISTER];
+ uint32_t pr[EXTI_NUM_REGISTER];
+
+ qemu_irq irq[EXTI_NUM_INTERRUPT_OUT_LINES];
+};
+
+#endif
diff --git a/include/hw/misc/stm32l4x5_rcc.h b/include/hw/misc/stm32l4x5_rcc.h
new file mode 100644
index 0000000000..0fbfba5c40
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_rcc.h
@@ -0,0 +1,239 @@
+/*
+ * STM32L4X5 RCC (Reset and clock control)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ *
+ * Inspired by the BCM2835 CPRMAN clock manager by Luc Michel.
+ */
+
+#ifndef HW_STM32L4X5_RCC_H
+#define HW_STM32L4X5_RCC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_STM32L4X5_RCC "stm32l4x5-rcc"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5RccState, STM32L4X5_RCC)
+
+/* In the Stm32l4x5 clock tree, mux have at most 7 sources */
+#define RCC_NUM_CLOCK_MUX_SRC 7
+
+typedef enum PllCommonChannels {
+ RCC_PLL_COMMON_CHANNEL_P = 0,
+ RCC_PLL_COMMON_CHANNEL_Q = 1,
+ RCC_PLL_COMMON_CHANNEL_R = 2,
+
+ RCC_NUM_CHANNEL_PLL_OUT = 3
+} PllCommonChannels;
+
+/* NB: Prescaler are assimilated to mux with one source and one output */
+typedef enum RccClockMux {
+ /* Internal muxes that arent't exposed publicly to other peripherals */
+ RCC_CLOCK_MUX_SYSCLK,
+ RCC_CLOCK_MUX_PLL_INPUT,
+ RCC_CLOCK_MUX_HCLK,
+ RCC_CLOCK_MUX_PCLK1,
+ RCC_CLOCK_MUX_PCLK2,
+ RCC_CLOCK_MUX_HSE_OVER_32,
+ RCC_CLOCK_MUX_LCD_AND_RTC_COMMON,
+
+ /* Muxes with a publicly available output */
+ RCC_CLOCK_MUX_CORTEX_REFCLK,
+ RCC_CLOCK_MUX_USART1,
+ RCC_CLOCK_MUX_USART2,
+ RCC_CLOCK_MUX_USART3,
+ RCC_CLOCK_MUX_UART4,
+ RCC_CLOCK_MUX_UART5,
+ RCC_CLOCK_MUX_LPUART1,
+ RCC_CLOCK_MUX_I2C1,
+ RCC_CLOCK_MUX_I2C2,
+ RCC_CLOCK_MUX_I2C3,
+ RCC_CLOCK_MUX_LPTIM1,
+ RCC_CLOCK_MUX_LPTIM2,
+ RCC_CLOCK_MUX_SWPMI1,
+ RCC_CLOCK_MUX_MCO,
+ RCC_CLOCK_MUX_LSCO,
+ RCC_CLOCK_MUX_DFSDM1,
+ RCC_CLOCK_MUX_ADC,
+ RCC_CLOCK_MUX_CLK48,
+ RCC_CLOCK_MUX_SAI1,
+ RCC_CLOCK_MUX_SAI2,
+
+ /*
+ * Mux that have only one input and one output assigned to as peripheral.
+ * They could be direct lines but it is simpler
+ * to use the same logic for all outputs.
+ */
+ /* - AHB1 */
+ RCC_CLOCK_MUX_TSC,
+ RCC_CLOCK_MUX_CRC,
+ RCC_CLOCK_MUX_FLASH,
+ RCC_CLOCK_MUX_DMA2,
+ RCC_CLOCK_MUX_DMA1,
+
+ /* - AHB2 */
+ RCC_CLOCK_MUX_RNG,
+ RCC_CLOCK_MUX_AES,
+ RCC_CLOCK_MUX_OTGFS,
+ RCC_CLOCK_MUX_GPIOA,
+ RCC_CLOCK_MUX_GPIOB,
+ RCC_CLOCK_MUX_GPIOC,
+ RCC_CLOCK_MUX_GPIOD,
+ RCC_CLOCK_MUX_GPIOE,
+ RCC_CLOCK_MUX_GPIOF,
+ RCC_CLOCK_MUX_GPIOG,
+ RCC_CLOCK_MUX_GPIOH,
+
+ /* - AHB3 */
+ RCC_CLOCK_MUX_QSPI,
+ RCC_CLOCK_MUX_FMC,
+
+ /* - APB1 */
+ RCC_CLOCK_MUX_OPAMP,
+ RCC_CLOCK_MUX_DAC1,
+ RCC_CLOCK_MUX_PWR,
+ RCC_CLOCK_MUX_CAN1,
+ RCC_CLOCK_MUX_SPI3,
+ RCC_CLOCK_MUX_SPI2,
+ RCC_CLOCK_MUX_WWDG,
+ RCC_CLOCK_MUX_LCD,
+ RCC_CLOCK_MUX_TIM7,
+ RCC_CLOCK_MUX_TIM6,
+ RCC_CLOCK_MUX_TIM5,
+ RCC_CLOCK_MUX_TIM4,
+ RCC_CLOCK_MUX_TIM3,
+ RCC_CLOCK_MUX_TIM2,
+
+ /* - APB2 */
+ RCC_CLOCK_MUX_TIM17,
+ RCC_CLOCK_MUX_TIM16,
+ RCC_CLOCK_MUX_TIM15,
+ RCC_CLOCK_MUX_TIM8,
+ RCC_CLOCK_MUX_SPI1,
+ RCC_CLOCK_MUX_TIM1,
+ RCC_CLOCK_MUX_SDMMC1,
+ RCC_CLOCK_MUX_FW,
+ RCC_CLOCK_MUX_SYSCFG,
+
+ /* - BDCR */
+ RCC_CLOCK_MUX_RTC,
+
+ /* - OTHER */
+ RCC_CLOCK_MUX_CORTEX_FCLK,
+
+ RCC_NUM_CLOCK_MUX
+} RccClockMux;
+
+typedef enum RccPll {
+ RCC_PLL_PLL,
+ RCC_PLL_PLLSAI1,
+ RCC_PLL_PLLSAI2,
+
+ RCC_NUM_PLL
+} RccPll;
+
+typedef struct RccClockMuxState {
+ DeviceState parent_obj;
+
+ RccClockMux id;
+ Clock *srcs[RCC_NUM_CLOCK_MUX_SRC];
+ Clock *out;
+ bool enabled;
+ uint32_t src;
+ uint32_t multiplier;
+ uint32_t divider;
+
+ /*
+ * Used by clock srcs update callback to retrieve both the clock and the
+ * source number.
+ */
+ struct RccClockMuxState *backref[RCC_NUM_CLOCK_MUX_SRC];
+} RccClockMuxState;
+
+typedef struct RccPllState {
+ DeviceState parent_obj;
+
+ RccPll id;
+ Clock *in;
+ uint32_t vco_multiplier;
+ Clock *channels[RCC_NUM_CHANNEL_PLL_OUT];
+ /* Global pll enabled flag */
+ bool enabled;
+ /* 'enabled' refers to the runtime configuration */
+ bool channel_enabled[RCC_NUM_CHANNEL_PLL_OUT];
+ /*
+ * 'exists' refers to the physical configuration
+ * It should only be set at pll initialization.
+ * e.g. pllsai2 doesn't have a Q output.
+ */
+ bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT];
+ uint32_t channel_divider[RCC_NUM_CHANNEL_PLL_OUT];
+} RccPllState;
+
+struct Stm32l4x5RccState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t cr;
+ uint32_t icscr;
+ uint32_t cfgr;
+ uint32_t pllcfgr;
+ uint32_t pllsai1cfgr;
+ uint32_t pllsai2cfgr;
+ uint32_t cier;
+ uint32_t cifr;
+ uint32_t ahb1rstr;
+ uint32_t ahb2rstr;
+ uint32_t ahb3rstr;
+ uint32_t apb1rstr1;
+ uint32_t apb1rstr2;
+ uint32_t apb2rstr;
+ uint32_t ahb1enr;
+ uint32_t ahb2enr;
+ uint32_t ahb3enr;
+ uint32_t apb1enr1;
+ uint32_t apb1enr2;
+ uint32_t apb2enr;
+ uint32_t ahb1smenr;
+ uint32_t ahb2smenr;
+ uint32_t ahb3smenr;
+ uint32_t apb1smenr1;
+ uint32_t apb1smenr2;
+ uint32_t apb2smenr;
+ uint32_t ccipr;
+ uint32_t bdcr;
+ uint32_t csr;
+
+ /* Clock sources */
+ Clock *gnd;
+ Clock *hsi16_rc;
+ Clock *msi_rc;
+ Clock *hse;
+ Clock *lsi_rc;
+ Clock *lse_crystal;
+ Clock *sai1_extclk;
+ Clock *sai2_extclk;
+
+ /* PLLs */
+ RccPllState plls[RCC_NUM_PLL];
+
+ /* Muxes ~= outputs */
+ RccClockMuxState clock_muxes[RCC_NUM_CLOCK_MUX];
+
+ qemu_irq irq;
+ uint64_t hse_frequency;
+ uint64_t sai1_extclk_frequency;
+ uint64_t sai2_extclk_frequency;
+};
+
+#endif /* HW_STM32L4X5_RCC_H */
diff --git a/include/hw/misc/stm32l4x5_rcc_internals.h b/include/hw/misc/stm32l4x5_rcc_internals.h
new file mode 100644
index 0000000000..ff1c834f69
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_rcc_internals.h
@@ -0,0 +1,1042 @@
+/*
+ * STM32L4X5 RCC (Reset and clock control)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ *
+ * Inspired by the BCM2835 CPRMAN clock manager implementation by Luc Michel.
+ */
+
+#ifndef HW_STM32L4X5_RCC_INTERNALS_H
+#define HW_STM32L4X5_RCC_INTERNALS_H
+
+#include "hw/registerfields.h"
+#include "hw/misc/stm32l4x5_rcc.h"
+
+#define TYPE_RCC_CLOCK_MUX "stm32l4x5-rcc-clock-mux"
+#define TYPE_RCC_PLL "stm32l4x5-rcc-pll"
+
+OBJECT_DECLARE_SIMPLE_TYPE(RccClockMuxState, RCC_CLOCK_MUX)
+OBJECT_DECLARE_SIMPLE_TYPE(RccPllState, RCC_PLL)
+
+/* Register map */
+REG32(CR, 0x00)
+ FIELD(CR, PLLSAI2RDY, 29, 1)
+ FIELD(CR, PLLSAI2ON, 28, 1)
+ FIELD(CR, PLLSAI1RDY, 27, 1)
+ FIELD(CR, PLLSAI1ON, 26, 1)
+ FIELD(CR, PLLRDY, 25, 1)
+ FIELD(CR, PLLON, 24, 1)
+ FIELD(CR, CSSON, 19, 1)
+ FIELD(CR, HSEBYP, 18, 1)
+ FIELD(CR, HSERDY, 17, 1)
+ FIELD(CR, HSEON, 16, 1)
+ FIELD(CR, HSIASFS, 11, 1)
+ FIELD(CR, HSIRDY, 10, 1)
+ FIELD(CR, HSIKERON, 9, 1)
+ FIELD(CR, HSION, 8, 1)
+ FIELD(CR, MSIRANGE, 4, 4)
+ FIELD(CR, MSIRGSEL, 3, 1)
+ FIELD(CR, MSIPLLEN, 2, 1)
+ FIELD(CR, MSIRDY, 1, 1)
+ FIELD(CR, MSION, 0, 1)
+REG32(ICSCR, 0x04)
+ FIELD(ICSCR, HSITRIM, 24, 7)
+ FIELD(ICSCR, HSICAL, 16, 8)
+ FIELD(ICSCR, MSITRIM, 8, 8)
+ FIELD(ICSCR, MSICAL, 0, 8)
+REG32(CFGR, 0x08)
+ FIELD(CFGR, MCOPRE, 28, 3)
+ /* MCOSEL[2:0] only for STM32L475xx/476xx/486xx devices */
+ FIELD(CFGR, MCOSEL, 24, 3)
+ FIELD(CFGR, STOPWUCK, 15, 1)
+ FIELD(CFGR, PPRE2, 11, 3)
+ FIELD(CFGR, PPRE1, 8, 3)
+ FIELD(CFGR, HPRE, 4, 4)
+ FIELD(CFGR, SWS, 2, 2)
+ FIELD(CFGR, SW, 0, 2)
+REG32(PLLCFGR, 0x0C)
+ FIELD(PLLCFGR, PLLPDIV, 27, 5)
+ FIELD(PLLCFGR, PLLR, 25, 2)
+ FIELD(PLLCFGR, PLLREN, 24, 1)
+ FIELD(PLLCFGR, PLLQ, 21, 2)
+ FIELD(PLLCFGR, PLLQEN, 20, 1)
+ FIELD(PLLCFGR, PLLP, 17, 1)
+ FIELD(PLLCFGR, PLLPEN, 16, 1)
+ FIELD(PLLCFGR, PLLN, 8, 7)
+ FIELD(PLLCFGR, PLLM, 4, 3)
+ FIELD(PLLCFGR, PLLSRC, 0, 2)
+REG32(PLLSAI1CFGR, 0x10)
+ FIELD(PLLSAI1CFGR, PLLSAI1PDIV, 27, 5)
+ FIELD(PLLSAI1CFGR, PLLSAI1R, 25, 2)
+ FIELD(PLLSAI1CFGR, PLLSAI1REN, 24, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1Q, 21, 2)
+ FIELD(PLLSAI1CFGR, PLLSAI1QEN, 20, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1P, 17, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1PEN, 16, 1)
+ FIELD(PLLSAI1CFGR, PLLSAI1N, 8, 7)
+REG32(PLLSAI2CFGR, 0x14)
+ FIELD(PLLSAI2CFGR, PLLSAI2PDIV, 27, 5)
+ FIELD(PLLSAI2CFGR, PLLSAI2R, 25, 2)
+ FIELD(PLLSAI2CFGR, PLLSAI2REN, 24, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2Q, 21, 2)
+ FIELD(PLLSAI2CFGR, PLLSAI2QEN, 20, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2P, 17, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2PEN, 16, 1)
+ FIELD(PLLSAI2CFGR, PLLSAI2N, 8, 7)
+REG32(CIER, 0x18)
+ /* HSI48RDYIE: only on STM32L496xx/4A6xx devices */
+ FIELD(CIER, LSECSSIE, 9, 1)
+ FIELD(CIER, PLLSAI2RDYIE, 7, 1)
+ FIELD(CIER, PLLSAI1RDYIE, 6, 1)
+ FIELD(CIER, PLLRDYIE, 5, 1)
+ FIELD(CIER, HSERDYIE, 4, 1)
+ FIELD(CIER, HSIRDYIE, 3, 1)
+ FIELD(CIER, MSIRDYIE, 2, 1)
+ FIELD(CIER, LSERDYIE, 1, 1)
+ FIELD(CIER, LSIRDYIE, 0, 1)
+REG32(CIFR, 0x1C)
+ /* HSI48RDYF: only on STM32L496xx/4A6xx devices */
+ FIELD(CIFR, LSECSSF, 9, 1)
+ FIELD(CIFR, CSSF, 8, 1)
+ FIELD(CIFR, PLLSAI2RDYF, 7, 1)
+ FIELD(CIFR, PLLSAI1RDYF, 6, 1)
+ FIELD(CIFR, PLLRDYF, 5, 1)
+ FIELD(CIFR, HSERDYF, 4, 1)
+ FIELD(CIFR, HSIRDYF, 3, 1)
+ FIELD(CIFR, MSIRDYF, 2, 1)
+ FIELD(CIFR, LSERDYF, 1, 1)
+ FIELD(CIFR, LSIRDYF, 0, 1)
+REG32(CICR, 0x20)
+ /* HSI48RDYC: only on STM32L496xx/4A6xx devices */
+ FIELD(CICR, LSECSSC, 9, 1)
+ FIELD(CICR, CSSC, 8, 1)
+ FIELD(CICR, PLLSAI2RDYC, 7, 1)
+ FIELD(CICR, PLLSAI1RDYC, 6, 1)
+ FIELD(CICR, PLLRDYC, 5, 1)
+ FIELD(CICR, HSERDYC, 4, 1)
+ FIELD(CICR, HSIRDYC, 3, 1)
+ FIELD(CICR, MSIRDYC, 2, 1)
+ FIELD(CICR, LSERDYC, 1, 1)
+ FIELD(CICR, LSIRDYC, 0, 1)
+REG32(AHB1RSTR, 0x28)
+REG32(AHB2RSTR, 0x2C)
+REG32(AHB3RSTR, 0x30)
+REG32(APB1RSTR1, 0x38)
+REG32(APB1RSTR2, 0x3C)
+REG32(APB2RSTR, 0x40)
+REG32(AHB1ENR, 0x48)
+ /* DMA2DEN: reserved for STM32L475xx */
+ FIELD(AHB1ENR, TSCEN, 16, 1)
+ FIELD(AHB1ENR, CRCEN, 12, 1)
+ FIELD(AHB1ENR, FLASHEN, 8, 1)
+ FIELD(AHB1ENR, DMA2EN, 1, 1)
+ FIELD(AHB1ENR, DMA1EN, 0, 1)
+REG32(AHB2ENR, 0x4C)
+ FIELD(AHB2ENR, RNGEN, 18, 1)
+ /* HASHEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, AESEN, 16, 1)
+ /* DCMIEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, ADCEN, 13, 1)
+ FIELD(AHB2ENR, OTGFSEN, 12, 1)
+ /* GPIOIEN: reserved for STM32L475xx */
+ FIELD(AHB2ENR, GPIOHEN, 7, 1)
+ FIELD(AHB2ENR, GPIOGEN, 6, 1)
+ FIELD(AHB2ENR, GPIOFEN, 5, 1)
+ FIELD(AHB2ENR, GPIOEEN, 4, 1)
+ FIELD(AHB2ENR, GPIODEN, 3, 1)
+ FIELD(AHB2ENR, GPIOCEN, 2, 1)
+ FIELD(AHB2ENR, GPIOBEN, 1, 1)
+ FIELD(AHB2ENR, GPIOAEN, 0, 1)
+REG32(AHB3ENR, 0x50)
+ FIELD(AHB3ENR, QSPIEN, 8, 1)
+ FIELD(AHB3ENR, FMCEN, 0, 1)
+REG32(APB1ENR1, 0x58)
+ FIELD(APB1ENR1, LPTIM1EN, 31, 1)
+ FIELD(APB1ENR1, OPAMPEN, 30, 1)
+ FIELD(APB1ENR1, DAC1EN, 29, 1)
+ FIELD(APB1ENR1, PWREN, 28, 1)
+ FIELD(APB1ENR1, CAN2EN, 26, 1)
+ FIELD(APB1ENR1, CAN1EN, 25, 1)
+ /* CRSEN: reserved for STM32L475xx */
+ FIELD(APB1ENR1, I2C3EN, 23, 1)
+ FIELD(APB1ENR1, I2C2EN, 22, 1)
+ FIELD(APB1ENR1, I2C1EN, 21, 1)
+ FIELD(APB1ENR1, UART5EN, 20, 1)
+ FIELD(APB1ENR1, UART4EN, 19, 1)
+ FIELD(APB1ENR1, USART3EN, 18, 1)
+ FIELD(APB1ENR1, USART2EN, 17, 1)
+ FIELD(APB1ENR1, SPI3EN, 15, 1)
+ FIELD(APB1ENR1, SPI2EN, 14, 1)
+ FIELD(APB1ENR1, WWDGEN, 11, 1)
+ /* RTCAPBEN: reserved for STM32L475xx */
+ FIELD(APB1ENR1, LCDEN, 9, 1)
+ FIELD(APB1ENR1, TIM7EN, 5, 1)
+ FIELD(APB1ENR1, TIM6EN, 4, 1)
+ FIELD(APB1ENR1, TIM5EN, 3, 1)
+ FIELD(APB1ENR1, TIM4EN, 2, 1)
+ FIELD(APB1ENR1, TIM3EN, 1, 1)
+ FIELD(APB1ENR1, TIM2EN, 0, 1)
+REG32(APB1ENR2, 0x5C)
+ FIELD(APB1ENR2, LPTIM2EN, 5, 1)
+ FIELD(APB1ENR2, SWPMI1EN, 2, 1)
+ /* I2C4EN: reserved for STM32L475xx */
+ FIELD(APB1ENR2, LPUART1EN, 0, 1)
+REG32(APB2ENR, 0x60)
+ FIELD(APB2ENR, DFSDM1EN, 24, 1)
+ FIELD(APB2ENR, SAI2EN, 22, 1)
+ FIELD(APB2ENR, SAI1EN, 21, 1)
+ FIELD(APB2ENR, TIM17EN, 18, 1)
+ FIELD(APB2ENR, TIM16EN, 17, 1)
+ FIELD(APB2ENR, TIM15EN, 16, 1)
+ FIELD(APB2ENR, USART1EN, 14, 1)
+ FIELD(APB2ENR, TIM8EN, 13, 1)
+ FIELD(APB2ENR, SPI1EN, 12, 1)
+ FIELD(APB2ENR, TIM1EN, 11, 1)
+ FIELD(APB2ENR, SDMMC1EN, 10, 1)
+ FIELD(APB2ENR, FWEN, 7, 1)
+ FIELD(APB2ENR, SYSCFGEN, 0, 1)
+REG32(AHB1SMENR, 0x68)
+REG32(AHB2SMENR, 0x6C)
+REG32(AHB3SMENR, 0x70)
+REG32(APB1SMENR1, 0x78)
+REG32(APB1SMENR2, 0x7C)
+REG32(APB2SMENR, 0x80)
+REG32(CCIPR, 0x88)
+ FIELD(CCIPR, DFSDM1SEL, 31, 1)
+ FIELD(CCIPR, SWPMI1SEL, 30, 1)
+ FIELD(CCIPR, ADCSEL, 28, 2)
+ FIELD(CCIPR, CLK48SEL, 26, 2)
+ FIELD(CCIPR, SAI2SEL, 24, 2)
+ FIELD(CCIPR, SAI1SEL, 22, 2)
+ FIELD(CCIPR, LPTIM2SEL, 20, 2)
+ FIELD(CCIPR, LPTIM1SEL, 18, 2)
+ FIELD(CCIPR, I2C3SEL, 16, 2)
+ FIELD(CCIPR, I2C2SEL, 14, 2)
+ FIELD(CCIPR, I2C1SEL, 12, 2)
+ FIELD(CCIPR, LPUART1SEL, 10, 2)
+ FIELD(CCIPR, UART5SEL, 8, 2)
+ FIELD(CCIPR, UART4SEL, 6, 2)
+ FIELD(CCIPR, USART3SEL, 4, 2)
+ FIELD(CCIPR, USART2SEL, 2, 2)
+ FIELD(CCIPR, USART1SEL, 0, 2)
+REG32(BDCR, 0x90)
+ FIELD(BDCR, LSCOSEL, 25, 1)
+ FIELD(BDCR, LSCOEN, 24, 1)
+ FIELD(BDCR, BDRST, 16, 1)
+ FIELD(BDCR, RTCEN, 15, 1)
+ FIELD(BDCR, RTCSEL, 8, 2)
+ FIELD(BDCR, LSECSSD, 6, 1)
+ FIELD(BDCR, LSECSSON, 5, 1)
+ FIELD(BDCR, LSEDRV, 3, 2)
+ FIELD(BDCR, LSEBYP, 2, 1)
+ FIELD(BDCR, LSERDY, 1, 1)
+ FIELD(BDCR, LSEON, 0, 1)
+REG32(CSR, 0x94)
+ FIELD(CSR, LPWRRSTF, 31, 1)
+ FIELD(CSR, WWDGRSTF, 30, 1)
+ FIELD(CSR, IWWGRSTF, 29, 1)
+ FIELD(CSR, SFTRSTF, 28, 1)
+ FIELD(CSR, BORRSTF, 27, 1)
+ FIELD(CSR, PINRSTF, 26, 1)
+ FIELD(CSR, OBLRSTF, 25, 1)
+ FIELD(CSR, FWRSTF, 24, 1)
+ FIELD(CSR, RMVF, 23, 1)
+ FIELD(CSR, MSISRANGE, 8, 4)
+ FIELD(CSR, LSIRDY, 1, 1)
+ FIELD(CSR, LSION, 0, 1)
+/* CRRCR and CCIPR2 registers are present on L496/L4A6 devices only. */
+
+/* Read Only masks to prevent writes in unauthorized bits */
+#define CR_READ_ONLY_MASK (R_CR_PLLSAI2RDY_MASK | \
+ R_CR_PLLSAI1RDY_MASK | \
+ R_CR_PLLRDY_MASK | \
+ R_CR_HSERDY_MASK | \
+ R_CR_HSIRDY_MASK | \
+ R_CR_MSIRDY_MASK)
+#define CR_READ_SET_MASK (R_CR_CSSON_MASK | R_CR_MSIRGSEL_MASK)
+#define ICSCR_READ_ONLY_MASK (R_ICSCR_HSICAL_MASK | R_ICSCR_MSICAL_MASK)
+#define CFGR_READ_ONLY_MASK (R_CFGR_SWS_MASK)
+#define CIFR_READ_ONLY_MASK (R_CIFR_LSECSSF_MASK | \
+ R_CIFR_CSSF_MASK | \
+ R_CIFR_PLLSAI2RDYF_MASK | \
+ R_CIFR_PLLSAI1RDYF_MASK | \
+ R_CIFR_PLLRDYF_MASK | \
+ R_CIFR_HSERDYF_MASK | \
+ R_CIFR_HSIRDYF_MASK | \
+ R_CIFR_MSIRDYF_MASK | \
+ R_CIFR_LSERDYF_MASK | \
+ R_CIFR_LSIRDYF_MASK)
+#define CIFR_IRQ_MASK CIFR_READ_ONLY_MASK
+#define APB2ENR_READ_SET_MASK (R_APB2ENR_FWEN_MASK)
+#define BDCR_READ_ONLY_MASK (R_BDCR_LSECSSD_MASK | R_BDCR_LSERDY_MASK)
+#define CSR_READ_ONLY_MASK (R_CSR_LPWRRSTF_MASK | \
+ R_CSR_WWDGRSTF_MASK | \
+ R_CSR_IWWGRSTF_MASK | \
+ R_CSR_SFTRSTF_MASK | \
+ R_CSR_BORRSTF_MASK | \
+ R_CSR_PINRSTF_MASK | \
+ R_CSR_OBLRSTF_MASK | \
+ R_CSR_FWRSTF_MASK | \
+ R_CSR_LSIRDY_MASK)
+
+/* Pll Channels */
+enum PllChannels {
+ RCC_PLL_CHANNEL_PLLSAI3CLK = 0,
+ RCC_PLL_CHANNEL_PLL48M1CLK = 1,
+ RCC_PLL_CHANNEL_PLLCLK = 2,
+};
+
+enum PllSai1Channels {
+ RCC_PLLSAI1_CHANNEL_PLLSAI1CLK = 0,
+ RCC_PLLSAI1_CHANNEL_PLL48M2CLK = 1,
+ RCC_PLLSAI1_CHANNEL_PLLADC1CLK = 2,
+};
+
+enum PllSai2Channels {
+ RCC_PLLSAI2_CHANNEL_PLLSAI2CLK = 0,
+ /* No Q channel */
+ RCC_PLLSAI2_CHANNEL_PLLADC2CLK = 2,
+};
+
+typedef enum RccClockMuxSource {
+ RCC_CLOCK_MUX_SRC_GND = 0,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ RCC_CLOCK_MUX_SRC_SAI1_EXTCLK,
+ RCC_CLOCK_MUX_SRC_SAI2_EXTCLK,
+ RCC_CLOCK_MUX_SRC_PLL,
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_PLL48M1,
+ RCC_CLOCK_MUX_SRC_PLL48M2,
+ RCC_CLOCK_MUX_SRC_PLLADC1,
+ RCC_CLOCK_MUX_SRC_PLLADC2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HCLK,
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_HSE_OVER_32,
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+
+ RCC_CLOCK_MUX_SRC_NUMBER,
+} RccClockMuxSource;
+
+/* PLL init info */
+typedef struct PllInitInfo {
+ const char *name;
+
+ const char *channel_name[RCC_NUM_CHANNEL_PLL_OUT];
+ bool channel_exists[RCC_NUM_CHANNEL_PLL_OUT];
+ uint32_t default_channel_divider[RCC_NUM_CHANNEL_PLL_OUT];
+
+ RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC];
+} PllInitInfo;
+
+static const PllInitInfo PLL_INIT_INFO[] = {
+ [RCC_PLL_PLL] = {
+ .name = "pll",
+ .channel_name = {
+ "pllsai3clk",
+ "pll48m1clk",
+ "pllclk"
+ },
+ .channel_exists = {
+ true, true, true
+ },
+ /* From PLLCFGR register documentation */
+ .default_channel_divider = {
+ 7, 2, 2
+ }
+ },
+ [RCC_PLL_PLLSAI1] = {
+ .name = "pllsai1",
+ .channel_name = {
+ "pllsai1clk",
+ "pll48m2clk",
+ "plladc1clk"
+ },
+ .channel_exists = {
+ true, true, true
+ },
+ /* From PLLSAI1CFGR register documentation */
+ .default_channel_divider = {
+ 7, 2, 2
+ }
+ },
+ [RCC_PLL_PLLSAI2] = {
+ .name = "pllsai2",
+ .channel_name = {
+ "pllsai2clk",
+ NULL,
+ "plladc2clk"
+ },
+ .channel_exists = {
+ true, false, true
+ },
+ /* From PLLSAI2CFGR register documentation */
+ .default_channel_divider = {
+ 7, 0, 2
+ }
+ }
+};
+
+static inline void set_pll_init_info(RccPllState *pll,
+ RccPll id)
+{
+ int i;
+
+ pll->id = id;
+ pll->vco_multiplier = 1;
+ for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) {
+ pll->channel_enabled[i] = false;
+ pll->channel_exists[i] = PLL_INIT_INFO[id].channel_exists[i];
+ pll->channel_divider[i] = PLL_INIT_INFO[id].default_channel_divider[i];
+ }
+}
+
+/* Clock mux init info */
+typedef struct ClockMuxInitInfo {
+ const char *name;
+
+ uint32_t multiplier;
+ uint32_t divider;
+ bool enabled;
+ /* If this is true, the clock will not be exposed outside of the device */
+ bool hidden;
+
+ RccClockMuxSource src_mapping[RCC_NUM_CLOCK_MUX_SRC];
+} ClockMuxInitInfo;
+
+#define FILL_DEFAULT_FACTOR \
+ .multiplier = 1, \
+ .divider = 1
+
+#define FILL_DEFAULT_INIT_ENABLED \
+ FILL_DEFAULT_FACTOR, \
+ .enabled = true
+
+#define FILL_DEFAULT_INIT_DISABLED \
+ FILL_DEFAULT_FACTOR, \
+ .enabled = false
+
+
+static const ClockMuxInitInfo CLOCK_MUX_INIT_INFO[] = {
+ [RCC_CLOCK_MUX_SYSCLK] = {
+ .name = "sysclk",
+ /* Same mapping as: CFGR_SW */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_PLL,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PLL_INPUT] = {
+ .name = "pll-input",
+ /* Same mapping as: PLLCFGR_PLLSRC */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_HCLK] = {
+ .name = "hclk",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PCLK1] = {
+ .name = "pclk1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_PCLK2] = {
+ .name = "pclk2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ [RCC_CLOCK_MUX_HSE_OVER_32] = {
+ .name = "hse-divided-by-32",
+ .multiplier = 1,
+ .divider = 32,
+ .enabled = true,
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HSE,
+ },
+ .hidden = true,
+ },
+ [RCC_CLOCK_MUX_LCD_AND_RTC_COMMON] = {
+ .name = "lcd-and-rtc-common-mux",
+ /* Same mapping as: BDCR_RTCSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_LSE,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSE_OVER_32,
+ },
+ .hidden = true,
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+ /* From now on, muxes with a publicly available output */
+ [RCC_CLOCK_MUX_CORTEX_REFCLK] = {
+ .name = "cortex-refclk",
+ .multiplier = 1,
+ /* REFCLK is always HCLK/8 */
+ .divider = 8,
+ .enabled = true,
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ }
+ },
+ [RCC_CLOCK_MUX_USART1] = {
+ .name = "usart1",
+ /* Same mapping as: CCIPR_USART1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_USART2] = {
+ .name = "usart2",
+ /* Same mapping as: CCIPR_USART2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_USART3] = {
+ .name = "usart3",
+ /* Same mapping as: CCIPR_USART3SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_UART4] = {
+ .name = "uart4",
+ /* Same mapping as: CCIPR_UART4SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_UART5] = {
+ .name = "uart5",
+ /* Same mapping as: CCIPR_UART5SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPUART1] = {
+ .name = "lpuart1",
+ /* Same mapping as: CCIPR_LPUART1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C1] = {
+ .name = "i2c1",
+ /* Same mapping as: CCIPR_I2C1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C2] = {
+ .name = "i2c2",
+ /* Same mapping as: CCIPR_I2C2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_I2C3] = {
+ .name = "i2c3",
+ /* Same mapping as: CCIPR_I2C3SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPTIM1] = {
+ .name = "lptim1",
+ /* Same mapping as: CCIPR_LPTIM1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LPTIM2] = {
+ .name = "lptim2",
+ /* Same mapping as: CCIPR_LPTIM2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SWPMI1] = {
+ .name = "swpmi1",
+ /* Same mapping as: CCIPR_SWPMI1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ RCC_CLOCK_MUX_SRC_HSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_MCO] = {
+ .name = "mco",
+ /* Same mapping as: CFGR_MCOSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ RCC_CLOCK_MUX_SRC_MSI,
+ RCC_CLOCK_MUX_SRC_HSI,
+ RCC_CLOCK_MUX_SRC_HSE,
+ RCC_CLOCK_MUX_SRC_PLL,
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LSCO] = {
+ .name = "lsco",
+ /* Same mapping as: BDCR_LSCOSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LSI,
+ RCC_CLOCK_MUX_SRC_LSE,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DFSDM1] = {
+ .name = "dfsdm1",
+ /* Same mapping as: CCIPR_DFSDM1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_ADC] = {
+ .name = "adc",
+ /* Same mapping as: CCIPR_ADCSEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_PLLADC1,
+ RCC_CLOCK_MUX_SRC_PLLADC2,
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CLK48] = {
+ .name = "clk48",
+ /* Same mapping as: CCIPR_CLK48SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_GND,
+ RCC_CLOCK_MUX_SRC_PLL48M2,
+ RCC_CLOCK_MUX_SRC_PLL48M1,
+ RCC_CLOCK_MUX_SRC_MSI,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SAI2] = {
+ .name = "sai2",
+ /* Same mapping as: CCIPR_SAI2SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_SAI2_EXTCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SAI1] = {
+ .name = "sai1",
+ /* Same mapping as: CCIPR_SAI1SEL */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PLLSAI1,
+ RCC_CLOCK_MUX_SRC_PLLSAI2,
+ RCC_CLOCK_MUX_SRC_PLLSAI3,
+ RCC_CLOCK_MUX_SRC_SAI1_EXTCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ /* From now on, these muxes only have one valid source */
+ [RCC_CLOCK_MUX_TSC] = {
+ .name = "tsc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CRC] = {
+ .name = "crc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FLASH] = {
+ .name = "flash",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DMA2] = {
+ .name = "dma2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DMA1] = {
+ .name = "dma1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_RNG] = {
+ .name = "rng",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_AES] = {
+ .name = "aes",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_OTGFS] = {
+ .name = "otgfs",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOA] = {
+ .name = "gpioa",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOB] = {
+ .name = "gpiob",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOC] = {
+ .name = "gpioc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOD] = {
+ .name = "gpiod",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOE] = {
+ .name = "gpioe",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOF] = {
+ .name = "gpiof",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOG] = {
+ .name = "gpiog",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_GPIOH] = {
+ .name = "gpioh",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_QSPI] = {
+ .name = "qspi",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FMC] = {
+ .name = "fmc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_OPAMP] = {
+ .name = "opamp",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_DAC1] = {
+ .name = "dac1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_PWR] = {
+ .name = "pwr",
+ /*
+ * PWREN is in the APB1ENR1 register,
+ * but PWR uses SYSCLK according to the clock tree.
+ */
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_SYSCLK,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CAN1] = {
+ .name = "can1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI3] = {
+ .name = "spi3",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI2] = {
+ .name = "spi2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_WWDG] = {
+ .name = "wwdg",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_LCD] = {
+ .name = "lcd",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM7] = {
+ .name = "tim7",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM6] = {
+ .name = "tim6",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM5] = {
+ .name = "tim5",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM4] = {
+ .name = "tim4",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM3] = {
+ .name = "tim3",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM2] = {
+ .name = "tim2",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK1,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM17] = {
+ .name = "tim17",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM16] = {
+ .name = "tim16",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM15] = {
+ .name = "tim15",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM8] = {
+ .name = "tim8",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SPI1] = {
+ .name = "spi1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_TIM1] = {
+ .name = "tim1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SDMMC1] = {
+ .name = "sdmmc1",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_FW] = {
+ .name = "fw",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_SYSCFG] = {
+ .name = "syscfg",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_PCLK2,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_RTC] = {
+ .name = "rtc",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON,
+ },
+ FILL_DEFAULT_INIT_DISABLED,
+ },
+ [RCC_CLOCK_MUX_CORTEX_FCLK] = {
+ .name = "cortex-fclk",
+ .src_mapping = {
+ RCC_CLOCK_MUX_SRC_HCLK,
+ },
+ FILL_DEFAULT_INIT_ENABLED,
+ },
+};
+
+static inline void set_clock_mux_init_info(RccClockMuxState *mux,
+ RccClockMux id)
+{
+ mux->id = id;
+ mux->multiplier = CLOCK_MUX_INIT_INFO[id].multiplier;
+ mux->divider = CLOCK_MUX_INIT_INFO[id].divider;
+ mux->enabled = CLOCK_MUX_INIT_INFO[id].enabled;
+ /*
+ * Every peripheral has the first source of their source list as
+ * as their default source.
+ */
+ mux->src = 0;
+}
+
+#endif /* HW_STM32L4X5_RCC_INTERNALS_H */
diff --git a/include/hw/misc/stm32l4x5_syscfg.h b/include/hw/misc/stm32l4x5_syscfg.h
new file mode 100644
index 0000000000..23bb564150
--- /dev/null
+++ b/include/hw/misc/stm32l4x5_syscfg.h
@@ -0,0 +1,53 @@
+/*
+ * STM32L4x5 SYSCFG (System Configuration Controller)
+ *
+ * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
+ * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This work is based on the stm32f4xx_syscfg by Alistair Francis.
+ * Original code is licensed under the MIT License:
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ */
+
+/*
+ * The reference used is the STMicroElectronics RM0351 Reference manual
+ * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
+ * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html
+ */
+
+#ifndef HW_STM32L4X5_SYSCFG_H
+#define HW_STM32L4X5_SYSCFG_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/gpio/stm32l4x5_gpio.h"
+
+#define TYPE_STM32L4X5_SYSCFG "stm32l4x5-syscfg"
+OBJECT_DECLARE_SIMPLE_TYPE(Stm32l4x5SyscfgState, STM32L4X5_SYSCFG)
+
+#define SYSCFG_NUM_EXTICR 4
+
+struct Stm32l4x5SyscfgState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t memrmp;
+ uint32_t cfgr1;
+ uint32_t exticr[SYSCFG_NUM_EXTICR];
+ uint32_t scsr;
+ uint32_t cfgr2;
+ uint32_t swpr;
+ uint32_t skr;
+ uint32_t swpr2;
+
+ qemu_irq gpio_out[GPIO_NUM_PINS];
+};
+
+#endif
diff --git a/include/hw/misc/tz-mpc.h b/include/hw/misc/tz-mpc.h
index 6f15945410..74d5d822cf 100644
--- a/include/hw/misc/tz-mpc.h
+++ b/include/hw/misc/tz-mpc.h
@@ -32,15 +32,15 @@
#define TZ_MPC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_TZ_MPC "tz-mpc"
-#define TZ_MPC(obj) OBJECT_CHECK(TZMPC, (obj), TYPE_TZ_MPC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZMPC, TZ_MPC)
#define TZ_NUM_PORTS 16
#define TYPE_TZ_MPC_IOMMU_MEMORY_REGION "tz-mpc-iommu-memory-region"
-typedef struct TZMPC TZMPC;
struct TZMPC {
/*< private >*/
diff --git a/include/hw/misc/tz-msc.h b/include/hw/misc/tz-msc.h
index 116b96ae9b..77cc7f2404 100644
--- a/include/hw/misc/tz-msc.h
+++ b/include/hw/misc/tz-msc.h
@@ -52,11 +52,12 @@
#include "hw/sysbus.h"
#include "target/arm/idau.h"
+#include "qom/object.h"
#define TYPE_TZ_MSC "tz-msc"
-#define TZ_MSC(obj) OBJECT_CHECK(TZMSC, (obj), TYPE_TZ_MSC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZMSC, TZ_MSC)
-typedef struct TZMSC {
+struct TZMSC {
/*< private >*/
SysBusDevice parent_obj;
@@ -74,6 +75,6 @@ typedef struct TZMSC {
AddressSpace downstream_as;
MemoryRegion upstream;
IDAUInterface *idau;
-} TZMSC;
+};
#endif
diff --git a/include/hw/misc/tz-ppc.h b/include/hw/misc/tz-ppc.h
index 080d6e2ec1..021d671b29 100644
--- a/include/hw/misc/tz-ppc.h
+++ b/include/hw/misc/tz-ppc.h
@@ -66,13 +66,13 @@
#define TZ_PPC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_TZ_PPC "tz-ppc"
-#define TZ_PPC(obj) OBJECT_CHECK(TZPPC, (obj), TYPE_TZ_PPC)
+OBJECT_DECLARE_SIMPLE_TYPE(TZPPC, TZ_PPC)
#define TZ_NUM_PORTS 16
-typedef struct TZPPC TZPPC;
typedef struct TZPPCPort {
TZPPC *ppc;
diff --git a/include/hw/misc/unimp.h b/include/hw/misc/unimp.h
index 4c1d13c9bf..518d627dc5 100644
--- a/include/hw/misc/unimp.h
+++ b/include/hw/misc/unimp.h
@@ -11,18 +11,19 @@
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qapi/error.h"
+#include "qom/object.h"
#define TYPE_UNIMPLEMENTED_DEVICE "unimplemented-device"
-#define UNIMPLEMENTED_DEVICE(obj) \
- OBJECT_CHECK(UnimplementedDeviceState, (obj), TYPE_UNIMPLEMENTED_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(UnimplementedDeviceState, UNIMPLEMENTED_DEVICE)
-typedef struct {
+struct UnimplementedDeviceState {
SysBusDevice parent_obj;
MemoryRegion iomem;
+ unsigned offset_fmt_width;
char *name;
uint64_t size;
-} UnimplementedDeviceState;
+};
/**
* create_unimplemented_device: create and map a dummy device
diff --git a/include/hw/misc/virt_ctrl.h b/include/hw/misc/virt_ctrl.h
new file mode 100644
index 0000000000..81346cf017
--- /dev/null
+++ b/include/hw/misc/virt_ctrl.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Virt system Controller
+ */
+
+#ifndef VIRT_CTRL_H
+#define VIRT_CTRL_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_VIRT_CTRL "virt-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtCtrlState, VIRT_CTRL)
+
+struct VirtCtrlState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t irq_enabled;
+};
+
+#endif
diff --git a/include/hw/misc/vmcoreinfo.h b/include/hw/misc/vmcoreinfo.h
index d4f3d3a91c..0b7b55d400 100644
--- a/include/hw/misc/vmcoreinfo.h
+++ b/include/hw/misc/vmcoreinfo.h
@@ -14,18 +14,21 @@
#include "hw/qdev-core.h"
#include "standard-headers/linux/qemu_fw_cfg.h"
+#include "qom/object.h"
#define VMCOREINFO_DEVICE "vmcoreinfo"
-#define VMCOREINFO(obj) OBJECT_CHECK(VMCoreInfoState, (obj), VMCOREINFO_DEVICE)
+typedef struct VMCoreInfoState VMCoreInfoState;
+DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO,
+ VMCOREINFO_DEVICE)
typedef struct fw_cfg_vmcoreinfo FWCfgVMCoreInfo;
-typedef struct VMCoreInfoState {
- DeviceClass parent_obj;
+struct VMCoreInfoState {
+ DeviceState parent_obj;
bool has_vmcoreinfo;
FWCfgVMCoreInfo vmcoreinfo;
-} VMCoreInfoState;
+};
/* returns NULL unless there is exactly one device */
static inline VMCoreInfoState *vmcoreinfo_find(void)
diff --git a/include/hw/misc/xlnx-cfi-if.h b/include/hw/misc/xlnx-cfi-if.h
new file mode 100644
index 0000000000..f9bd12292d
--- /dev/null
+++ b/include/hw/misc/xlnx-cfi-if.h
@@ -0,0 +1,59 @@
+/*
+ * Xilinx CFI interface
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef XLNX_CFI_IF_H
+#define XLNX_CFI_IF_H 1
+
+#include "qemu/help-texts.h"
+#include "hw/hw.h"
+#include "qom/object.h"
+
+#define TYPE_XLNX_CFI_IF "xlnx-cfi-if"
+typedef struct XlnxCfiIfClass XlnxCfiIfClass;
+DECLARE_CLASS_CHECKERS(XlnxCfiIfClass, XLNX_CFI_IF, TYPE_XLNX_CFI_IF)
+
+#define XLNX_CFI_IF(obj) \
+ INTERFACE_CHECK(XlnxCfiIf, (obj), TYPE_XLNX_CFI_IF)
+
+typedef enum {
+ PACKET_TYPE_CFU = 0x52,
+ PACKET_TYPE_CFRAME = 0xA1,
+} xlnx_cfi_packet_type;
+
+typedef enum {
+ CFRAME_FAR = 1,
+ CFRAME_SFR = 2,
+ CFRAME_FDRI = 4,
+ CFRAME_CMD = 6,
+} xlnx_cfi_reg_addr;
+
+typedef struct XlnxCfiPacket {
+ uint8_t reg_addr;
+ uint32_t data[4];
+} XlnxCfiPacket;
+
+typedef struct XlnxCfiIf {
+ Object Parent;
+} XlnxCfiIf;
+
+typedef struct XlnxCfiIfClass {
+ InterfaceClass parent;
+
+ void (*cfi_transfer_packet)(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
+} XlnxCfiIfClass;
+
+/**
+ * Transfer a XlnxCfiPacket.
+ *
+ * @cfi_if: the object implementing this interface
+ * @XlnxCfiPacket: a pointer to the XlnxCfiPacket to transfer
+ */
+void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
+
+#endif /* XLNX_CFI_IF_H */
diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h
new file mode 100644
index 0000000000..83f6a07744
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-cframe-reg.h
@@ -0,0 +1,303 @@
+/*
+ * QEMU model of the Configuration Frame Control module
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * References:
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/CFRAME_REG-Module
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CFRAME_REG_H
+#define HW_MISC_XLNX_VERSAL_CFRAME_REG_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/misc/xlnx-cfi-if.h"
+#include "hw/misc/xlnx-versal-cfu.h"
+#include "qemu/fifo32.h"
+
+#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx-cframe-reg"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG)
+
+#define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameBcastReg,
+ XLNX_VERSAL_CFRAME_BCAST_REG)
+
+/*
+ * The registers in this module are 128 bits wide but it is ok to write
+ * and read them through 4 sequential 32 bit accesses (address[3:2] = 0,
+ * 1, 2, 3).
+ */
+REG32(CRC0, 0x0)
+ FIELD(CRC, CRC, 0, 32)
+REG32(CRC1, 0x4)
+REG32(CRC2, 0x8)
+REG32(CRC3, 0xc)
+REG32(FAR0, 0x10)
+ FIELD(FAR0, SEGMENT, 23, 2)
+ FIELD(FAR0, BLOCKTYPE, 20, 3)
+ FIELD(FAR0, FRAME_ADDR, 0, 20)
+REG32(FAR1, 0x14)
+REG32(FAR2, 0x18)
+REG32(FAR3, 0x1c)
+REG32(FAR_SFR0, 0x20)
+ FIELD(FAR_SFR0, BLOCKTYPE, 20, 3)
+ FIELD(FAR_SFR0, FRAME_ADDR, 0, 20)
+REG32(FAR_SFR1, 0x24)
+REG32(FAR_SFR2, 0x28)
+REG32(FAR_SFR3, 0x2c)
+REG32(FDRI0, 0x40)
+REG32(FDRI1, 0x44)
+REG32(FDRI2, 0x48)
+REG32(FDRI3, 0x4c)
+REG32(FRCNT0, 0x50)
+ FIELD(FRCNT0, FRCNT, 0, 32)
+REG32(FRCNT1, 0x54)
+REG32(FRCNT2, 0x58)
+REG32(FRCNT3, 0x5c)
+REG32(CMD0, 0x60)
+ FIELD(CMD0, CMD, 0, 5)
+REG32(CMD1, 0x64)
+REG32(CMD2, 0x68)
+REG32(CMD3, 0x6c)
+REG32(CR_MASK0, 0x70)
+REG32(CR_MASK1, 0x74)
+REG32(CR_MASK2, 0x78)
+REG32(CR_MASK3, 0x7c)
+REG32(CTL0, 0x80)
+ FIELD(CTL, PER_FRAME_CRC, 0, 1)
+REG32(CTL1, 0x84)
+REG32(CTL2, 0x88)
+REG32(CTL3, 0x8c)
+REG32(CFRM_ISR0, 0x150)
+ FIELD(CFRM_ISR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_ISR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_ISR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_ISR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_ISR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_ISR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_ISR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_ISR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_ISR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_ISR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_ISR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_ISR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_ISR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_ISR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_ISR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_ISR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_ISR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_ISR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_ISR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_ISR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_ISR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_ISR1, 0x154)
+REG32(CFRM_ISR2, 0x158)
+REG32(CFRM_ISR3, 0x15c)
+REG32(CFRM_IMR0, 0x160)
+ FIELD(CFRM_IMR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IMR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IMR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IMR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IMR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IMR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IMR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IMR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IMR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IMR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IMR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IMR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IMR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IMR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IMR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IMR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IMR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IMR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IMR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IMR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IMR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IMR1, 0x164)
+REG32(CFRM_IMR2, 0x168)
+REG32(CFRM_IMR3, 0x16c)
+REG32(CFRM_IER0, 0x170)
+ FIELD(CFRM_IER0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IER0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IER0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IER0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IER0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IER0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IER0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IER0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IER0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IER0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IER0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IER0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IER0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IER0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IER0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IER0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IER0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IER0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IER0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IER0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IER0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IER1, 0x174)
+REG32(CFRM_IER2, 0x178)
+REG32(CFRM_IER3, 0x17c)
+REG32(CFRM_IDR0, 0x180)
+ FIELD(CFRM_IDR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_IDR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_IDR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_IDR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_IDR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_IDR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_IDR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_IDR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_IDR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_IDR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_IDR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_IDR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_IDR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_IDR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_IDR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_IDR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_IDR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_IDR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_IDR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_IDR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_IDR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_IDR1, 0x184)
+REG32(CFRM_IDR2, 0x188)
+REG32(CFRM_IDR3, 0x18c)
+REG32(CFRM_ITR0, 0x190)
+ FIELD(CFRM_ITR0, READ_BROADCAST_ERROR, 21, 1)
+ FIELD(CFRM_ITR0, CMD_MISSING_ERROR, 20, 1)
+ FIELD(CFRM_ITR0, RW_ROWOFF_ERROR, 19, 1)
+ FIELD(CFRM_ITR0, READ_REG_ADDR_ERROR, 18, 1)
+ FIELD(CFRM_ITR0, READ_BLK_TYPE_ERROR, 17, 1)
+ FIELD(CFRM_ITR0, READ_FRAME_ADDR_ERROR, 16, 1)
+ FIELD(CFRM_ITR0, WRITE_REG_ADDR_ERROR, 15, 1)
+ FIELD(CFRM_ITR0, WRITE_BLK_TYPE_ERROR, 13, 1)
+ FIELD(CFRM_ITR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
+ FIELD(CFRM_ITR0, MFW_OVERRUN_ERROR, 11, 1)
+ FIELD(CFRM_ITR0, FAR_FIFO_UNDERFLOW, 10, 1)
+ FIELD(CFRM_ITR0, FAR_FIFO_OVERFLOW, 9, 1)
+ FIELD(CFRM_ITR0, PER_FRAME_SEQ_ERROR, 8, 1)
+ FIELD(CFRM_ITR0, CRC_ERROR, 7, 1)
+ FIELD(CFRM_ITR0, WRITE_OVERRUN_ERROR, 6, 1)
+ FIELD(CFRM_ITR0, READ_OVERRUN_ERROR, 5, 1)
+ FIELD(CFRM_ITR0, CMD_INTERRUPT_ERROR, 4, 1)
+ FIELD(CFRM_ITR0, WRITE_INTERRUPT_ERROR, 3, 1)
+ FIELD(CFRM_ITR0, READ_INTERRUPT_ERROR, 2, 1)
+ FIELD(CFRM_ITR0, SEU_CRC_ERROR, 1, 1)
+ FIELD(CFRM_ITR0, SEU_ECC_ERROR, 0, 1)
+REG32(CFRM_ITR1, 0x194)
+REG32(CFRM_ITR2, 0x198)
+REG32(CFRM_ITR3, 0x19c)
+REG32(SEU_SYNDRM00, 0x1a0)
+REG32(SEU_SYNDRM01, 0x1a4)
+REG32(SEU_SYNDRM02, 0x1a8)
+REG32(SEU_SYNDRM03, 0x1ac)
+REG32(SEU_SYNDRM10, 0x1b0)
+REG32(SEU_SYNDRM11, 0x1b4)
+REG32(SEU_SYNDRM12, 0x1b8)
+REG32(SEU_SYNDRM13, 0x1bc)
+REG32(SEU_SYNDRM20, 0x1c0)
+REG32(SEU_SYNDRM21, 0x1c4)
+REG32(SEU_SYNDRM22, 0x1c8)
+REG32(SEU_SYNDRM23, 0x1cc)
+REG32(SEU_SYNDRM30, 0x1d0)
+REG32(SEU_SYNDRM31, 0x1d4)
+REG32(SEU_SYNDRM32, 0x1d8)
+REG32(SEU_SYNDRM33, 0x1dc)
+REG32(SEU_VIRTUAL_SYNDRM0, 0x1e0)
+REG32(SEU_VIRTUAL_SYNDRM1, 0x1e4)
+REG32(SEU_VIRTUAL_SYNDRM2, 0x1e8)
+REG32(SEU_VIRTUAL_SYNDRM3, 0x1ec)
+REG32(SEU_CRC0, 0x1f0)
+REG32(SEU_CRC1, 0x1f4)
+REG32(SEU_CRC2, 0x1f8)
+REG32(SEU_CRC3, 0x1fc)
+REG32(CFRAME_FAR_BOT0, 0x200)
+REG32(CFRAME_FAR_BOT1, 0x204)
+REG32(CFRAME_FAR_BOT2, 0x208)
+REG32(CFRAME_FAR_BOT3, 0x20c)
+REG32(CFRAME_FAR_TOP0, 0x210)
+REG32(CFRAME_FAR_TOP1, 0x214)
+REG32(CFRAME_FAR_TOP2, 0x218)
+REG32(CFRAME_FAR_TOP3, 0x21c)
+REG32(LAST_FRAME_BOT0, 0x220)
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB, 20, 12)
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME, 0, 20)
+REG32(LAST_FRAME_BOT1, 0x224)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB, 28, 4)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME, 8, 20)
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB, 0, 8)
+REG32(LAST_FRAME_BOT2, 0x228)
+ FIELD(LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB, 0, 16)
+REG32(LAST_FRAME_BOT3, 0x22c)
+REG32(LAST_FRAME_TOP0, 0x230)
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB, 20, 12)
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME, 0, 20)
+REG32(LAST_FRAME_TOP1, 0x234)
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME, 8, 20)
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB, 0, 8)
+REG32(LAST_FRAME_TOP2, 0x238)
+REG32(LAST_FRAME_TOP3, 0x23c)
+
+#define CFRAME_REG_R_MAX (R_LAST_FRAME_TOP3 + 1)
+
+#define FRAME_NUM_QWORDS 25
+#define FRAME_NUM_WORDS (FRAME_NUM_QWORDS * 4) /* 25 * 128 bits */
+
+typedef struct XlnxCFrame {
+ uint32_t data[FRAME_NUM_WORDS];
+} XlnxCFrame;
+
+struct XlnxVersalCFrameReg {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ MemoryRegion iomem_fdri;
+ qemu_irq irq_cfrm_imr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ uint32_t regs[CFRAME_REG_R_MAX];
+ RegisterInfo regs_info[CFRAME_REG_R_MAX];
+
+ bool rowon;
+ bool wcfg;
+ bool rcfg;
+
+ GTree *cframes;
+ Fifo32 new_f_data;
+
+ struct {
+ XlnxCfiIf *cfu_fdro;
+ uint32_t blktype_num_frames[7];
+ } cfg;
+ bool row_configured;
+};
+
+struct XlnxVersalCFrameBcastReg {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_reg;
+ MemoryRegion iomem_fdri;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ struct {
+ XlnxCfiIf *cframe[15];
+ } cfg;
+};
+
+#endif
diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h
new file mode 100644
index 0000000000..3de3ee4923
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-cfu.h
@@ -0,0 +1,258 @@
+/*
+ * QEMU model of the CFU Configuration Unit.
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * References:
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/CFU_CSR-Module
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CFU_APB_H
+#define HW_MISC_XLNX_VERSAL_CFU_APB_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/misc/xlnx-cfi-if.h"
+#include "qemu/fifo32.h"
+
+#define TYPE_XLNX_VERSAL_CFU_APB "xlnx-versal-cfu-apb"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
+
+#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx-versal-cfu-fdro"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO)
+
+#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx-versal-cfu-sfr"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR)
+
+REG32(CFU_ISR, 0x0)
+ FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_ISR, SLVERR, 7, 1)
+ FIELD(CFU_ISR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_ISR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_ISR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_ISR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_ISR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_ISR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_ISR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IMR, 0x4)
+ FIELD(CFU_IMR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IMR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IMR, SLVERR, 7, 1)
+ FIELD(CFU_IMR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IMR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IMR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IMR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IMR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IMR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IMR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IER, 0x8)
+ FIELD(CFU_IER, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IER, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IER, SLVERR, 7, 1)
+ FIELD(CFU_IER, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IER, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IER, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IER, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IER, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IER, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IER, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_IDR, 0xc)
+ FIELD(CFU_IDR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_IDR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_IDR, SLVERR, 7, 1)
+ FIELD(CFU_IDR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_IDR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_IDR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_IDR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_IDR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_IDR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_IDR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_ITR, 0x10)
+ FIELD(CFU_ITR, USR_GTS_EVENT, 9, 1)
+ FIELD(CFU_ITR, USR_GSR_EVENT, 8, 1)
+ FIELD(CFU_ITR, SLVERR, 7, 1)
+ FIELD(CFU_ITR, DECOMP_ERROR, 6, 1)
+ FIELD(CFU_ITR, BAD_CFI_PACKET, 5, 1)
+ FIELD(CFU_ITR, AXI_ALIGN_ERROR, 4, 1)
+ FIELD(CFU_ITR, CFI_ROW_ERROR, 3, 1)
+ FIELD(CFU_ITR, CRC32_ERROR, 2, 1)
+ FIELD(CFU_ITR, CRC8_ERROR, 1, 1)
+ FIELD(CFU_ITR, SEU_ENDOFCALIB, 0, 1)
+REG32(CFU_PROTECT, 0x14)
+ FIELD(CFU_PROTECT, ACTIVE, 0, 1)
+REG32(CFU_FGCR, 0x18)
+ FIELD(CFU_FGCR, GCLK_CAL, 14, 1)
+ FIELD(CFU_FGCR, SC_HBC_TRIGGER, 13, 1)
+ FIELD(CFU_FGCR, GLOW, 12, 1)
+ FIELD(CFU_FGCR, GPWRDWN, 11, 1)
+ FIELD(CFU_FGCR, GCAP, 10, 1)
+ FIELD(CFU_FGCR, GSCWE, 9, 1)
+ FIELD(CFU_FGCR, GHIGH_B, 8, 1)
+ FIELD(CFU_FGCR, GMC_B, 7, 1)
+ FIELD(CFU_FGCR, GWE, 6, 1)
+ FIELD(CFU_FGCR, GRESTORE, 5, 1)
+ FIELD(CFU_FGCR, GTS_CFG_B, 4, 1)
+ FIELD(CFU_FGCR, GLUTMASK, 3, 1)
+ FIELD(CFU_FGCR, EN_GLOBS_B, 2, 1)
+ FIELD(CFU_FGCR, EOS, 1, 1)
+ FIELD(CFU_FGCR, INIT_COMPLETE, 0, 1)
+REG32(CFU_CTL, 0x1c)
+ FIELD(CFU_CTL, GSR_GSC, 15, 1)
+ FIELD(CFU_CTL, SLVERR_EN, 14, 1)
+ FIELD(CFU_CTL, CRC32_RESET, 13, 1)
+ FIELD(CFU_CTL, AXI_ERROR_EN, 12, 1)
+ FIELD(CFU_CTL, FLUSH_AXI, 11, 1)
+ FIELD(CFU_CTL, SSI_PER_SLR_PR, 10, 1)
+ FIELD(CFU_CTL, GCAP_CLK_EN, 9, 1)
+ FIELD(CFU_CTL, STATUS_SYNC_DISABLE, 8, 1)
+ FIELD(CFU_CTL, IGNORE_CFI_ERROR, 7, 1)
+ FIELD(CFU_CTL, CFRAME_DISABLE, 6, 1)
+ FIELD(CFU_CTL, QWORD_CNT_RESET, 5, 1)
+ FIELD(CFU_CTL, CRC8_DISABLE, 4, 1)
+ FIELD(CFU_CTL, CRC32_CHECK, 3, 1)
+ FIELD(CFU_CTL, DECOMPRESS, 2, 1)
+ FIELD(CFU_CTL, SEU_GO, 1, 1)
+ FIELD(CFU_CTL, CFI_LOCAL_RESET, 0, 1)
+REG32(CFU_CRAM_RW, 0x20)
+ FIELD(CFU_CRAM_RW, RFIFO_AFULL_DEPTH, 18, 9)
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT_LEFT, 12, 6)
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT, 6, 6)
+ FIELD(CFU_CRAM_RW, WR_WAVE_CNT, 0, 6)
+REG32(CFU_MASK, 0x28)
+REG32(CFU_CRC_EXPECT, 0x2c)
+REG32(CFU_CFRAME_LEFT_T0, 0x60)
+ FIELD(CFU_CFRAME_LEFT_T0, NUM, 0, 20)
+REG32(CFU_CFRAME_LEFT_T1, 0x64)
+ FIELD(CFU_CFRAME_LEFT_T1, NUM, 0, 20)
+REG32(CFU_CFRAME_LEFT_T2, 0x68)
+ FIELD(CFU_CFRAME_LEFT_T2, NUM, 0, 20)
+REG32(CFU_ROW_RANGE, 0x6c)
+ FIELD(CFU_ROW_RANGE, HALF_FSR, 5, 1)
+ FIELD(CFU_ROW_RANGE, NUM, 0, 5)
+REG32(CFU_STATUS, 0x100)
+ FIELD(CFU_STATUS, SEU_WRITE_ERROR, 30, 1)
+ FIELD(CFU_STATUS, FRCNT_ERROR, 29, 1)
+ FIELD(CFU_STATUS, RSVD_ERROR, 28, 1)
+ FIELD(CFU_STATUS, FDRO_ERROR, 27, 1)
+ FIELD(CFU_STATUS, FDRI_ERROR, 26, 1)
+ FIELD(CFU_STATUS, FDRI_READ_ERROR, 25, 1)
+ FIELD(CFU_STATUS, READ_FDRI_ERROR, 24, 1)
+ FIELD(CFU_STATUS, READ_SFR_ERROR, 23, 1)
+ FIELD(CFU_STATUS, READ_STREAM_ERROR, 22, 1)
+ FIELD(CFU_STATUS, UNKNOWN_STREAM_PKT, 21, 1)
+ FIELD(CFU_STATUS, USR_GTS, 20, 1)
+ FIELD(CFU_STATUS, USR_GSR, 19, 1)
+ FIELD(CFU_STATUS, AXI_BAD_WSTRB, 18, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AR_SIZE, 17, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AW_SIZE, 16, 1)
+ FIELD(CFU_STATUS, AXI_BAD_ARADDR, 15, 1)
+ FIELD(CFU_STATUS, AXI_BAD_AWADDR, 14, 1)
+ FIELD(CFU_STATUS, SCAN_CLEAR_PASS, 13, 1)
+ FIELD(CFU_STATUS, HC_SEC_ERROR, 12, 1)
+ FIELD(CFU_STATUS, GHIGH_B_ISHIGH, 11, 1)
+ FIELD(CFU_STATUS, GHIGH_B_ISLOW, 10, 1)
+ FIELD(CFU_STATUS, GMC_B_ISHIGH, 9, 1)
+ FIELD(CFU_STATUS, GMC_B_ISLOW, 8, 1)
+ FIELD(CFU_STATUS, GPWRDWN_B_ISHIGH, 7, 1)
+ FIELD(CFU_STATUS, CFI_SEU_CRC_ERROR, 6, 1)
+ FIELD(CFU_STATUS, CFI_SEU_ECC_ERROR, 5, 1)
+ FIELD(CFU_STATUS, CFI_SEU_HEARTBEAT, 4, 1)
+ FIELD(CFU_STATUS, SCAN_CLEAR_DONE, 3, 1)
+ FIELD(CFU_STATUS, HC_COMPLETE, 2, 1)
+ FIELD(CFU_STATUS, CFI_CFRAME_BUSY, 1, 1)
+ FIELD(CFU_STATUS, CFU_STREAM_BUSY, 0, 1)
+REG32(CFU_INTERNAL_STATUS, 0x104)
+ FIELD(CFU_INTERNAL_STATUS, SSI_EOS, 22, 1)
+ FIELD(CFU_INTERNAL_STATUS, SSI_GWE, 21, 1)
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_EMPTY, 20, 1)
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_FULL, 19, 1)
+ FIELD(CFU_INTERNAL_STATUS, SEL_SFR, 18, 1)
+ FIELD(CFU_INTERNAL_STATUS, STREAM_CFRAME, 17, 1)
+ FIELD(CFU_INTERNAL_STATUS, FDRI_PHASE, 16, 1)
+ FIELD(CFU_INTERNAL_STATUS, CFI_PIPE_EN, 15, 1)
+ FIELD(CFU_INTERNAL_STATUS, AWFIFO_DCNT, 10, 5)
+ FIELD(CFU_INTERNAL_STATUS, WFIFO_DCNT, 5, 5)
+ FIELD(CFU_INTERNAL_STATUS, REPAIR_BUSY, 4, 1)
+ FIELD(CFU_INTERNAL_STATUS, TRIMU_BUSY, 3, 1)
+ FIELD(CFU_INTERNAL_STATUS, TRIMB_BUSY, 2, 1)
+ FIELD(CFU_INTERNAL_STATUS, HCLEANR_BUSY, 1, 1)
+ FIELD(CFU_INTERNAL_STATUS, HCLEAN_BUSY, 0, 1)
+REG32(CFU_QWORD_CNT, 0x108)
+REG32(CFU_CRC_LIVE, 0x10c)
+REG32(CFU_PENDING_READ_CNT, 0x110)
+ FIELD(CFU_PENDING_READ_CNT, NUM, 0, 25)
+REG32(CFU_FDRI_CNT, 0x114)
+REG32(CFU_ECO1, 0x118)
+REG32(CFU_ECO2, 0x11c)
+
+#define R_MAX (R_CFU_ECO2 + 1)
+
+#define NUM_STREAM 2
+#define WFIFO_SZ 4
+
+struct XlnxVersalCFUAPB {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ MemoryRegion iomem_stream[NUM_STREAM];
+ qemu_irq irq_cfu_imr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ uint32_t regs[R_MAX];
+ RegisterInfo regs_info[R_MAX];
+
+ uint8_t fdri_row_addr;
+
+ struct {
+ XlnxCfiIf *cframe[15];
+ } cfg;
+};
+
+
+struct XlnxVersalCFUFDRO {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_fdro;
+
+ Fifo32 fdro_data;
+};
+
+struct XlnxVersalCFUSFR {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem_sfr;
+
+ /* 128-bit wfifo. */
+ uint32_t wfifo[WFIFO_SZ];
+
+ struct {
+ XlnxVersalCFUAPB *cfu;
+ } cfg;
+};
+
+/**
+ * This is a helper function for updating a CFI data write fifo, an array of 4
+ * uint32_t and 128 bits of data that are allowed to be written through 4
+ * sequential 32 bit accesses. After the last index has been written into the
+ * write fifo (wfifo), the data is copied to and returned in a secondary fifo
+ * provided to the function (wfifo_ret), and the write fifo is cleared
+ * (zeroized).
+ *
+ * @addr: the address used when calculating the wfifo array index to update
+ * @value: the value to write into the wfifo array
+ * @wfifo: the wfifo to update
+ * @wfifo_out: will return the wfifo data when all 128 bits have been written
+ *
+ * @return: true if all 128 bits have been updated.
+ */
+bool update_wfifo(hwaddr addr, uint64_t value,
+ uint32_t *wfifo, uint32_t *wfifo_ret);
+
+#endif
diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h
new file mode 100644
index 0000000000..dba6d3585d
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-crl.h
@@ -0,0 +1,235 @@
+/*
+ * QEMU model of the Clock-Reset-LPD (CRL).
+ *
+ * Copyright (c) 2022 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+#ifndef HW_MISC_XLNX_VERSAL_CRL_H
+#define HW_MISC_XLNX_VERSAL_CRL_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "target/arm/cpu-qom.h"
+
+#define TYPE_XLNX_VERSAL_CRL "xlnx-versal-crl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL)
+
+REG32(ERR_CTRL, 0x0)
+ FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1)
+REG32(IR_STATUS, 0x4)
+ FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_MASK, 0x8)
+ FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_ENABLE, 0xc)
+ FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_DISABLE, 0x10)
+ FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(WPROT, 0x1c)
+ FIELD(WPROT, ACTIVE, 0, 1)
+REG32(PLL_CLK_OTHER_DMN, 0x20)
+ FIELD(PLL_CLK_OTHER_DMN, APLL_BYPASS, 0, 1)
+REG32(RPLL_CTRL, 0x40)
+ FIELD(RPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(RPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(RPLL_CTRL, CLKOUTDIV, 16, 2)
+ FIELD(RPLL_CTRL, FBDIV, 8, 8)
+ FIELD(RPLL_CTRL, BYPASS, 3, 1)
+ FIELD(RPLL_CTRL, RESET, 0, 1)
+REG32(RPLL_CFG, 0x44)
+ FIELD(RPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(RPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(RPLL_CFG, LFHF, 10, 2)
+ FIELD(RPLL_CFG, CP, 5, 4)
+ FIELD(RPLL_CFG, RES, 0, 4)
+REG32(RPLL_FRAC_CFG, 0x48)
+ FIELD(RPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(RPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(RPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(RPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(RPLL_FRAC_CFG, DATA, 0, 16)
+REG32(PLL_STATUS, 0x50)
+ FIELD(PLL_STATUS, RPLL_STABLE, 2, 1)
+ FIELD(PLL_STATUS, RPLL_LOCK, 0, 1)
+REG32(RPLL_TO_XPD_CTRL, 0x100)
+ FIELD(RPLL_TO_XPD_CTRL, CLKACT, 25, 1)
+ FIELD(RPLL_TO_XPD_CTRL, DIVISOR0, 8, 10)
+REG32(LPD_TOP_SWITCH_CTRL, 0x104)
+ FIELD(LPD_TOP_SWITCH_CTRL, CLKACT_ADMA, 26, 1)
+ FIELD(LPD_TOP_SWITCH_CTRL, CLKACT, 25, 1)
+ FIELD(LPD_TOP_SWITCH_CTRL, DIVISOR0, 8, 10)
+ FIELD(LPD_TOP_SWITCH_CTRL, SRCSEL, 0, 3)
+REG32(LPD_LSBUS_CTRL, 0x108)
+ FIELD(LPD_LSBUS_CTRL, CLKACT, 25, 1)
+ FIELD(LPD_LSBUS_CTRL, DIVISOR0, 8, 10)
+ FIELD(LPD_LSBUS_CTRL, SRCSEL, 0, 3)
+REG32(CPU_R5_CTRL, 0x10c)
+ FIELD(CPU_R5_CTRL, CLKACT_OCM2, 28, 1)
+ FIELD(CPU_R5_CTRL, CLKACT_OCM, 27, 1)
+ FIELD(CPU_R5_CTRL, CLKACT_CORE, 26, 1)
+ FIELD(CPU_R5_CTRL, CLKACT, 25, 1)
+ FIELD(CPU_R5_CTRL, DIVISOR0, 8, 10)
+ FIELD(CPU_R5_CTRL, SRCSEL, 0, 3)
+REG32(IOU_SWITCH_CTRL, 0x114)
+ FIELD(IOU_SWITCH_CTRL, CLKACT, 25, 1)
+ FIELD(IOU_SWITCH_CTRL, DIVISOR0, 8, 10)
+ FIELD(IOU_SWITCH_CTRL, SRCSEL, 0, 3)
+REG32(GEM0_REF_CTRL, 0x118)
+ FIELD(GEM0_REF_CTRL, CLKACT_RX, 27, 1)
+ FIELD(GEM0_REF_CTRL, CLKACT_TX, 26, 1)
+ FIELD(GEM0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM0_REF_CTRL, SRCSEL, 0, 3)
+REG32(GEM1_REF_CTRL, 0x11c)
+ FIELD(GEM1_REF_CTRL, CLKACT_RX, 27, 1)
+ FIELD(GEM1_REF_CTRL, CLKACT_TX, 26, 1)
+ FIELD(GEM1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM1_REF_CTRL, SRCSEL, 0, 3)
+REG32(GEM_TSU_REF_CTRL, 0x120)
+ FIELD(GEM_TSU_REF_CTRL, CLKACT, 25, 1)
+ FIELD(GEM_TSU_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(GEM_TSU_REF_CTRL, SRCSEL, 0, 3)
+REG32(USB0_BUS_REF_CTRL, 0x124)
+ FIELD(USB0_BUS_REF_CTRL, CLKACT, 25, 1)
+ FIELD(USB0_BUS_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(USB0_BUS_REF_CTRL, SRCSEL, 0, 3)
+REG32(UART0_REF_CTRL, 0x128)
+ FIELD(UART0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(UART0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(UART0_REF_CTRL, SRCSEL, 0, 3)
+REG32(UART1_REF_CTRL, 0x12c)
+ FIELD(UART1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(UART1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(UART1_REF_CTRL, SRCSEL, 0, 3)
+REG32(SPI0_REF_CTRL, 0x130)
+ FIELD(SPI0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(SPI0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(SPI0_REF_CTRL, SRCSEL, 0, 3)
+REG32(SPI1_REF_CTRL, 0x134)
+ FIELD(SPI1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(SPI1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(SPI1_REF_CTRL, SRCSEL, 0, 3)
+REG32(CAN0_REF_CTRL, 0x138)
+ FIELD(CAN0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CAN0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CAN0_REF_CTRL, SRCSEL, 0, 3)
+REG32(CAN1_REF_CTRL, 0x13c)
+ FIELD(CAN1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CAN1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CAN1_REF_CTRL, SRCSEL, 0, 3)
+REG32(I2C0_REF_CTRL, 0x140)
+ FIELD(I2C0_REF_CTRL, CLKACT, 25, 1)
+ FIELD(I2C0_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(I2C0_REF_CTRL, SRCSEL, 0, 3)
+REG32(I2C1_REF_CTRL, 0x144)
+ FIELD(I2C1_REF_CTRL, CLKACT, 25, 1)
+ FIELD(I2C1_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(I2C1_REF_CTRL, SRCSEL, 0, 3)
+REG32(DBG_LPD_CTRL, 0x148)
+ FIELD(DBG_LPD_CTRL, CLKACT, 25, 1)
+ FIELD(DBG_LPD_CTRL, DIVISOR0, 8, 10)
+ FIELD(DBG_LPD_CTRL, SRCSEL, 0, 3)
+REG32(TIMESTAMP_REF_CTRL, 0x14c)
+ FIELD(TIMESTAMP_REF_CTRL, CLKACT, 25, 1)
+ FIELD(TIMESTAMP_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(TIMESTAMP_REF_CTRL, SRCSEL, 0, 3)
+REG32(CRL_SAFETY_CHK, 0x150)
+REG32(PSM_REF_CTRL, 0x154)
+ FIELD(PSM_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(PSM_REF_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TSTMP_CTRL, 0x158)
+ FIELD(DBG_TSTMP_CTRL, CLKACT, 25, 1)
+ FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 10)
+ FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3)
+REG32(CPM_TOPSW_REF_CTRL, 0x15c)
+ FIELD(CPM_TOPSW_REF_CTRL, CLKACT, 25, 1)
+ FIELD(CPM_TOPSW_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(CPM_TOPSW_REF_CTRL, SRCSEL, 0, 3)
+REG32(USB3_DUAL_REF_CTRL, 0x160)
+ FIELD(USB3_DUAL_REF_CTRL, CLKACT, 25, 1)
+ FIELD(USB3_DUAL_REF_CTRL, DIVISOR0, 8, 10)
+ FIELD(USB3_DUAL_REF_CTRL, SRCSEL, 0, 3)
+REG32(RST_CPU_R5, 0x300)
+ FIELD(RST_CPU_R5, RESET_PGE, 4, 1)
+ FIELD(RST_CPU_R5, RESET_AMBA, 2, 1)
+ FIELD(RST_CPU_R5, RESET_CPU1, 1, 1)
+ FIELD(RST_CPU_R5, RESET_CPU0, 0, 1)
+REG32(RST_ADMA, 0x304)
+ FIELD(RST_ADMA, RESET, 0, 1)
+REG32(RST_GEM0, 0x308)
+ FIELD(RST_GEM0, RESET, 0, 1)
+REG32(RST_GEM1, 0x30c)
+ FIELD(RST_GEM1, RESET, 0, 1)
+REG32(RST_SPARE, 0x310)
+ FIELD(RST_SPARE, RESET, 0, 1)
+REG32(RST_USB0, 0x314)
+ FIELD(RST_USB0, RESET, 0, 1)
+REG32(RST_UART0, 0x318)
+ FIELD(RST_UART0, RESET, 0, 1)
+REG32(RST_UART1, 0x31c)
+ FIELD(RST_UART1, RESET, 0, 1)
+REG32(RST_SPI0, 0x320)
+ FIELD(RST_SPI0, RESET, 0, 1)
+REG32(RST_SPI1, 0x324)
+ FIELD(RST_SPI1, RESET, 0, 1)
+REG32(RST_CAN0, 0x328)
+ FIELD(RST_CAN0, RESET, 0, 1)
+REG32(RST_CAN1, 0x32c)
+ FIELD(RST_CAN1, RESET, 0, 1)
+REG32(RST_I2C0, 0x330)
+ FIELD(RST_I2C0, RESET, 0, 1)
+REG32(RST_I2C1, 0x334)
+ FIELD(RST_I2C1, RESET, 0, 1)
+REG32(RST_DBG_LPD, 0x338)
+ FIELD(RST_DBG_LPD, RPU_DBG1_RESET, 5, 1)
+ FIELD(RST_DBG_LPD, RPU_DBG0_RESET, 4, 1)
+ FIELD(RST_DBG_LPD, RESET_HSDP, 1, 1)
+ FIELD(RST_DBG_LPD, RESET, 0, 1)
+REG32(RST_GPIO, 0x33c)
+ FIELD(RST_GPIO, RESET, 0, 1)
+REG32(RST_TTC, 0x344)
+ FIELD(RST_TTC, TTC3_RESET, 3, 1)
+ FIELD(RST_TTC, TTC2_RESET, 2, 1)
+ FIELD(RST_TTC, TTC1_RESET, 1, 1)
+ FIELD(RST_TTC, TTC0_RESET, 0, 1)
+REG32(RST_TIMESTAMP, 0x348)
+ FIELD(RST_TIMESTAMP, RESET, 0, 1)
+REG32(RST_SWDT, 0x34c)
+ FIELD(RST_SWDT, RESET, 0, 1)
+REG32(RST_OCM, 0x350)
+ FIELD(RST_OCM, RESET, 0, 1)
+REG32(RST_IPI, 0x354)
+ FIELD(RST_IPI, RESET, 0, 1)
+REG32(RST_SYSMON, 0x358)
+ FIELD(RST_SYSMON, SEQ_RST, 1, 1)
+ FIELD(RST_SYSMON, CFG_RST, 0, 1)
+REG32(RST_FPD, 0x360)
+ FIELD(RST_FPD, SRST, 1, 1)
+ FIELD(RST_FPD, POR, 0, 1)
+REG32(PSM_RST_MODE, 0x370)
+ FIELD(PSM_RST_MODE, WAKEUP, 2, 1)
+ FIELD(PSM_RST_MODE, RST_MODE, 0, 2)
+
+#define CRL_R_MAX (R_PSM_RST_MODE + 1)
+
+#define RPU_MAX_CPU 2
+
+struct XlnxVersalCRL {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+
+ struct {
+ ARMCPU *cpu_r5[RPU_MAX_CPU];
+ DeviceState *adma[8];
+ DeviceState *uart[2];
+ DeviceState *gem[2];
+ DeviceState *usb;
+ } cfg;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[CRL_R_MAX];
+ RegisterInfo regs_info[CRL_R_MAX];
+};
+#endif
diff --git a/include/hw/misc/xlnx-versal-pmc-iou-slcr.h b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
new file mode 100644
index 0000000000..0c4a4fd66d
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
@@ -0,0 +1,79 @@
+/*
+ * Header file for the Xilinx Versal's PMC IOU SLCR
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's PMC I/O Peripheral Control and Status
+ * module documented in Versal's Technical Reference manual [1] and the Versal
+ * ACAP Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PMC_IOP_SLCR-Module
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus IRQ 0: PMC (AXI and APB) parity error interrupt detected by the PMC
+ * I/O peripherals.
+ * + sysbus IRQ 1: Device interrupt.
+ * + Named GPIO output "sd-emmc-sel[0]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 0.
+ * + Named GPIO output "sd-emmc-sel[1]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 1.
+ * + Named GPIO output "qspi-ospi-mux-sel": Selects 0: QSPI linear region or 1:
+ * OSPI linear region.
+ * + Named GPIO output "ospi-mux-sel": Selects 0: OSPI Indirect access mode or
+ * 1: OSPI direct access mode.
+ */
+
+#ifndef XLNX_VERSAL_PMC_IOU_SLCR_H
+#define XLNX_VERSAL_PMC_IOU_SLCR_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XILINX_VERSAL_PMC_IOU_SLCR "xlnx.versal-pmc-iou-slcr"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR)
+
+#define XILINX_VERSAL_PMC_IOU_SLCR_R_MAX (0x828 / 4 + 1)
+
+struct XlnxVersalPmcIouSlcr {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_parity_imr;
+ qemu_irq irq_imr;
+ qemu_irq sd_emmc_sel[2];
+ qemu_irq qspi_ospi_mux_sel;
+ qemu_irq ospi_mux_sel;
+
+ uint32_t regs[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+};
+
+#endif /* XLNX_VERSAL_PMC_IOU_SLCR_H */
diff --git a/include/hw/misc/xlnx-versal-trng.h b/include/hw/misc/xlnx-versal-trng.h
new file mode 100644
index 0000000000..0bcef8a613
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-trng.h
@@ -0,0 +1,58 @@
+/*
+ * Non-crypto strength model of the True Random Number Generator
+ * in the AMD/Xilinx Versal device family.
+ *
+ * Copyright (c) 2017-2020 Xilinx Inc.
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_VERSAL_TRNG_H
+#define XLNX_VERSAL_TRNG_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_VERSAL_TRNG "xlnx.versal-trng"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalTRng, XLNX_VERSAL_TRNG);
+
+#define RMAX_XLNX_VERSAL_TRNG ((0xf0 / 4) + 1)
+
+typedef struct XlnxVersalTRng {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+ GRand *prng;
+
+ uint32_t hw_version;
+ uint32_t forced_faults;
+
+ uint32_t rand_count;
+ uint64_t rand_reseed;
+
+ uint64_t forced_prng_seed;
+ uint64_t forced_prng_count;
+ uint64_t tst_seed[2];
+
+ uint32_t regs[RMAX_XLNX_VERSAL_TRNG];
+ RegisterInfo regs_info[RMAX_XLNX_VERSAL_TRNG];
+} XlnxVersalTRng;
+
+#undef RMAX_XLNX_VERSAL_TRNG
+#endif
diff --git a/include/hw/misc/xlnx-versal-xramc.h b/include/hw/misc/xlnx-versal-xramc.h
new file mode 100644
index 0000000000..d3d1862676
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-xramc.h
@@ -0,0 +1,97 @@
+/*
+ * QEMU model of the Xilinx XRAM Controller.
+ *
+ * Copyright (c) 2021 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+
+#ifndef XLNX_VERSAL_XRAMC_H
+#define XLNX_VERSAL_XRAMC_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_XRAM_CTRL "xlnx.versal-xramc"
+
+#define XLNX_XRAM_CTRL(obj) \
+ OBJECT_CHECK(XlnxXramCtrl, (obj), TYPE_XLNX_XRAM_CTRL)
+
+REG32(XRAM_ERR_CTRL, 0x0)
+ FIELD(XRAM_ERR_CTRL, UE_RES, 3, 1)
+ FIELD(XRAM_ERR_CTRL, PWR_ERR_RES, 2, 1)
+ FIELD(XRAM_ERR_CTRL, PZ_ERR_RES, 1, 1)
+ FIELD(XRAM_ERR_CTRL, APB_ERR_RES, 0, 1)
+REG32(XRAM_ISR, 0x4)
+ FIELD(XRAM_ISR, INV_APB, 0, 1)
+REG32(XRAM_IMR, 0x8)
+ FIELD(XRAM_IMR, INV_APB, 0, 1)
+REG32(XRAM_IEN, 0xc)
+ FIELD(XRAM_IEN, INV_APB, 0, 1)
+REG32(XRAM_IDS, 0x10)
+ FIELD(XRAM_IDS, INV_APB, 0, 1)
+REG32(XRAM_ECC_CNTL, 0x14)
+ FIELD(XRAM_ECC_CNTL, FI_MODE, 2, 1)
+ FIELD(XRAM_ECC_CNTL, DET_ONLY, 1, 1)
+ FIELD(XRAM_ECC_CNTL, ECC_ON_OFF, 0, 1)
+REG32(XRAM_CLR_EXE, 0x18)
+ FIELD(XRAM_CLR_EXE, MON_7, 7, 1)
+ FIELD(XRAM_CLR_EXE, MON_6, 6, 1)
+ FIELD(XRAM_CLR_EXE, MON_5, 5, 1)
+ FIELD(XRAM_CLR_EXE, MON_4, 4, 1)
+ FIELD(XRAM_CLR_EXE, MON_3, 3, 1)
+ FIELD(XRAM_CLR_EXE, MON_2, 2, 1)
+ FIELD(XRAM_CLR_EXE, MON_1, 1, 1)
+ FIELD(XRAM_CLR_EXE, MON_0, 0, 1)
+REG32(XRAM_CE_FFA, 0x1c)
+ FIELD(XRAM_CE_FFA, ADDR, 0, 20)
+REG32(XRAM_CE_FFD0, 0x20)
+REG32(XRAM_CE_FFD1, 0x24)
+REG32(XRAM_CE_FFD2, 0x28)
+REG32(XRAM_CE_FFD3, 0x2c)
+REG32(XRAM_CE_FFE, 0x30)
+ FIELD(XRAM_CE_FFE, SYNDROME, 0, 16)
+REG32(XRAM_UE_FFA, 0x34)
+ FIELD(XRAM_UE_FFA, ADDR, 0, 20)
+REG32(XRAM_UE_FFD0, 0x38)
+REG32(XRAM_UE_FFD1, 0x3c)
+REG32(XRAM_UE_FFD2, 0x40)
+REG32(XRAM_UE_FFD3, 0x44)
+REG32(XRAM_UE_FFE, 0x48)
+ FIELD(XRAM_UE_FFE, SYNDROME, 0, 16)
+REG32(XRAM_FI_D0, 0x4c)
+REG32(XRAM_FI_D1, 0x50)
+REG32(XRAM_FI_D2, 0x54)
+REG32(XRAM_FI_D3, 0x58)
+REG32(XRAM_FI_SY, 0x5c)
+ FIELD(XRAM_FI_SY, DATA, 0, 16)
+REG32(XRAM_RMW_UE_FFA, 0x70)
+ FIELD(XRAM_RMW_UE_FFA, ADDR, 0, 20)
+REG32(XRAM_FI_CNTR, 0x74)
+ FIELD(XRAM_FI_CNTR, COUNT, 0, 24)
+REG32(XRAM_IMP, 0x80)
+ FIELD(XRAM_IMP, SIZE, 0, 4)
+REG32(XRAM_PRDY_DBG, 0x84)
+ FIELD(XRAM_PRDY_DBG, ISLAND3, 12, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND2, 8, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND1, 4, 4)
+ FIELD(XRAM_PRDY_DBG, ISLAND0, 0, 4)
+REG32(XRAM_SAFETY_CHK, 0xff8)
+
+#define XRAM_CTRL_R_MAX (R_XRAM_SAFETY_CHK + 1)
+
+typedef struct XlnxXramCtrl {
+ SysBusDevice parent_obj;
+ MemoryRegion ram;
+ qemu_irq irq;
+
+ struct {
+ uint64_t size;
+ unsigned int encoded_size;
+ } cfg;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[XRAM_CTRL_R_MAX];
+ RegisterInfo regs_info[XRAM_CTRL_R_MAX];
+} XlnxXramCtrl;
+#endif
diff --git a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h
new file mode 100644
index 0000000000..c3bf3c1583
--- /dev/null
+++ b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h
@@ -0,0 +1,93 @@
+/*
+ * QEMU model of ZynqMP APU Control.
+ *
+ * Copyright (c) 2013-2022 Xilinx Inc
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com> and
+ * Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ */
+#ifndef HW_MISC_XLNX_ZYNQMP_APU_CTRL_H
+#define HW_MISC_XLNX_ZYNQMP_APU_CTRL_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "target/arm/cpu-qom.h"
+
+#define TYPE_XLNX_ZYNQMP_APU_CTRL "xlnx.apu-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPAPUCtrl, XLNX_ZYNQMP_APU_CTRL)
+
+REG32(APU_ERR_CTRL, 0x0)
+ FIELD(APU_ERR_CTRL, PSLVERR, 0, 1)
+REG32(ISR, 0x10)
+ FIELD(ISR, INV_APB, 0, 1)
+REG32(IMR, 0x14)
+ FIELD(IMR, INV_APB, 0, 1)
+REG32(IEN, 0x18)
+ FIELD(IEN, INV_APB, 0, 1)
+REG32(IDS, 0x1c)
+ FIELD(IDS, INV_APB, 0, 1)
+REG32(CONFIG_0, 0x20)
+ FIELD(CONFIG_0, CFGTE, 24, 4)
+ FIELD(CONFIG_0, CFGEND, 16, 4)
+ FIELD(CONFIG_0, VINITHI, 8, 4)
+ FIELD(CONFIG_0, AA64NAA32, 0, 4)
+REG32(CONFIG_1, 0x24)
+ FIELD(CONFIG_1, L2RSTDISABLE, 29, 1)
+ FIELD(CONFIG_1, L1RSTDISABLE, 28, 1)
+ FIELD(CONFIG_1, CP15DISABLE, 0, 4)
+REG32(RVBARADDR0L, 0x40)
+ FIELD(RVBARADDR0L, ADDR, 2, 30)
+REG32(RVBARADDR0H, 0x44)
+ FIELD(RVBARADDR0H, ADDR, 0, 8)
+REG32(RVBARADDR1L, 0x48)
+ FIELD(RVBARADDR1L, ADDR, 2, 30)
+REG32(RVBARADDR1H, 0x4c)
+ FIELD(RVBARADDR1H, ADDR, 0, 8)
+REG32(RVBARADDR2L, 0x50)
+ FIELD(RVBARADDR2L, ADDR, 2, 30)
+REG32(RVBARADDR2H, 0x54)
+ FIELD(RVBARADDR2H, ADDR, 0, 8)
+REG32(RVBARADDR3L, 0x58)
+ FIELD(RVBARADDR3L, ADDR, 2, 30)
+REG32(RVBARADDR3H, 0x5c)
+ FIELD(RVBARADDR3H, ADDR, 0, 8)
+REG32(ACE_CTRL, 0x60)
+ FIELD(ACE_CTRL, AWQOS, 16, 4)
+ FIELD(ACE_CTRL, ARQOS, 0, 4)
+REG32(SNOOP_CTRL, 0x80)
+ FIELD(SNOOP_CTRL, ACE_INACT, 4, 1)
+ FIELD(SNOOP_CTRL, ACP_INACT, 0, 1)
+REG32(PWRCTL, 0x90)
+ FIELD(PWRCTL, CLREXMONREQ, 17, 1)
+ FIELD(PWRCTL, L2FLUSHREQ, 16, 1)
+ FIELD(PWRCTL, CPUPWRDWNREQ, 0, 4)
+REG32(PWRSTAT, 0x94)
+ FIELD(PWRSTAT, CLREXMONACK, 17, 1)
+ FIELD(PWRSTAT, L2FLUSHDONE, 16, 1)
+ FIELD(PWRSTAT, DBGNOPWRDWN, 0, 4)
+
+#define APU_R_MAX ((R_PWRSTAT) + 1)
+
+#define APU_MAX_CPU 4
+
+struct XlnxZynqMPAPUCtrl {
+ SysBusDevice busdev;
+
+ ARMCPU *cpus[APU_MAX_CPU];
+ /* WFIs towards PMU. */
+ qemu_irq wfi_out[4];
+ /* CPU Power status towards INTC Redirect. */
+ qemu_irq cpu_power_status[4];
+ qemu_irq irq_imr;
+
+ uint8_t cpu_pwrdwn_req;
+ uint8_t cpu_in_wfi;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[APU_R_MAX];
+ RegisterInfo regs_info[APU_R_MAX];
+};
+
+#endif
diff --git a/include/hw/misc/xlnx-zynqmp-crf.h b/include/hw/misc/xlnx-zynqmp-crf.h
new file mode 100644
index 0000000000..02ef0bdeee
--- /dev/null
+++ b/include/hw/misc/xlnx-zynqmp-crf.h
@@ -0,0 +1,211 @@
+/*
+ * QEMU model of the CRF - Clock Reset FPD.
+ *
+ * Copyright (c) 2022 Xilinx Inc.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ */
+#ifndef HW_MISC_XLNX_ZYNQMP_CRF_H
+#define HW_MISC_XLNX_ZYNQMP_CRF_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define TYPE_XLNX_ZYNQMP_CRF "xlnx.zynqmp_crf"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPCRF, XLNX_ZYNQMP_CRF)
+
+REG32(ERR_CTRL, 0x0)
+ FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1)
+REG32(IR_STATUS, 0x4)
+ FIELD(IR_STATUS, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_MASK, 0x8)
+ FIELD(IR_MASK, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_ENABLE, 0xc)
+ FIELD(IR_ENABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(IR_DISABLE, 0x10)
+ FIELD(IR_DISABLE, ADDR_DECODE_ERR, 0, 1)
+REG32(CRF_WPROT, 0x1c)
+ FIELD(CRF_WPROT, ACTIVE, 0, 1)
+REG32(APLL_CTRL, 0x20)
+ FIELD(APLL_CTRL, POST_SRC, 24, 3)
+ FIELD(APLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(APLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(APLL_CTRL, DIV2, 16, 1)
+ FIELD(APLL_CTRL, FBDIV, 8, 7)
+ FIELD(APLL_CTRL, BYPASS, 3, 1)
+ FIELD(APLL_CTRL, RESET, 0, 1)
+REG32(APLL_CFG, 0x24)
+ FIELD(APLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(APLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(APLL_CFG, LFHF, 10, 2)
+ FIELD(APLL_CFG, CP, 5, 4)
+ FIELD(APLL_CFG, RES, 0, 4)
+REG32(APLL_FRAC_CFG, 0x28)
+ FIELD(APLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(APLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(APLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(APLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(APLL_FRAC_CFG, DATA, 0, 16)
+REG32(DPLL_CTRL, 0x2c)
+ FIELD(DPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(DPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(DPLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(DPLL_CTRL, DIV2, 16, 1)
+ FIELD(DPLL_CTRL, FBDIV, 8, 7)
+ FIELD(DPLL_CTRL, BYPASS, 3, 1)
+ FIELD(DPLL_CTRL, RESET, 0, 1)
+REG32(DPLL_CFG, 0x30)
+ FIELD(DPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(DPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(DPLL_CFG, LFHF, 10, 2)
+ FIELD(DPLL_CFG, CP, 5, 4)
+ FIELD(DPLL_CFG, RES, 0, 4)
+REG32(DPLL_FRAC_CFG, 0x34)
+ FIELD(DPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(DPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(DPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(DPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(DPLL_FRAC_CFG, DATA, 0, 16)
+REG32(VPLL_CTRL, 0x38)
+ FIELD(VPLL_CTRL, POST_SRC, 24, 3)
+ FIELD(VPLL_CTRL, PRE_SRC, 20, 3)
+ FIELD(VPLL_CTRL, CLKOUTDIV, 17, 1)
+ FIELD(VPLL_CTRL, DIV2, 16, 1)
+ FIELD(VPLL_CTRL, FBDIV, 8, 7)
+ FIELD(VPLL_CTRL, BYPASS, 3, 1)
+ FIELD(VPLL_CTRL, RESET, 0, 1)
+REG32(VPLL_CFG, 0x3c)
+ FIELD(VPLL_CFG, LOCK_DLY, 25, 7)
+ FIELD(VPLL_CFG, LOCK_CNT, 13, 10)
+ FIELD(VPLL_CFG, LFHF, 10, 2)
+ FIELD(VPLL_CFG, CP, 5, 4)
+ FIELD(VPLL_CFG, RES, 0, 4)
+REG32(VPLL_FRAC_CFG, 0x40)
+ FIELD(VPLL_FRAC_CFG, ENABLED, 31, 1)
+ FIELD(VPLL_FRAC_CFG, SEED, 22, 3)
+ FIELD(VPLL_FRAC_CFG, ALGRTHM, 19, 1)
+ FIELD(VPLL_FRAC_CFG, ORDER, 18, 1)
+ FIELD(VPLL_FRAC_CFG, DATA, 0, 16)
+REG32(PLL_STATUS, 0x44)
+ FIELD(PLL_STATUS, VPLL_STABLE, 5, 1)
+ FIELD(PLL_STATUS, DPLL_STABLE, 4, 1)
+ FIELD(PLL_STATUS, APLL_STABLE, 3, 1)
+ FIELD(PLL_STATUS, VPLL_LOCK, 2, 1)
+ FIELD(PLL_STATUS, DPLL_LOCK, 1, 1)
+ FIELD(PLL_STATUS, APLL_LOCK, 0, 1)
+REG32(APLL_TO_LPD_CTRL, 0x48)
+ FIELD(APLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(DPLL_TO_LPD_CTRL, 0x4c)
+ FIELD(DPLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(VPLL_TO_LPD_CTRL, 0x50)
+ FIELD(VPLL_TO_LPD_CTRL, DIVISOR0, 8, 6)
+REG32(ACPU_CTRL, 0x60)
+ FIELD(ACPU_CTRL, CLKACT_HALF, 25, 1)
+ FIELD(ACPU_CTRL, CLKACT_FULL, 24, 1)
+ FIELD(ACPU_CTRL, DIVISOR0, 8, 6)
+ FIELD(ACPU_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TRACE_CTRL, 0x64)
+ FIELD(DBG_TRACE_CTRL, CLKACT, 24, 1)
+ FIELD(DBG_TRACE_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_TRACE_CTRL, SRCSEL, 0, 3)
+REG32(DBG_FPD_CTRL, 0x68)
+ FIELD(DBG_FPD_CTRL, CLKACT, 24, 1)
+ FIELD(DBG_FPD_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_FPD_CTRL, SRCSEL, 0, 3)
+REG32(DP_VIDEO_REF_CTRL, 0x70)
+ FIELD(DP_VIDEO_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_VIDEO_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_VIDEO_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_VIDEO_REF_CTRL, SRCSEL, 0, 3)
+REG32(DP_AUDIO_REF_CTRL, 0x74)
+ FIELD(DP_AUDIO_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_AUDIO_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_AUDIO_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_AUDIO_REF_CTRL, SRCSEL, 0, 3)
+REG32(DP_STC_REF_CTRL, 0x7c)
+ FIELD(DP_STC_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DP_STC_REF_CTRL, DIVISOR1, 16, 6)
+ FIELD(DP_STC_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DP_STC_REF_CTRL, SRCSEL, 0, 3)
+REG32(DDR_CTRL, 0x80)
+ FIELD(DDR_CTRL, CLKACT, 24, 1)
+ FIELD(DDR_CTRL, DIVISOR0, 8, 6)
+ FIELD(DDR_CTRL, SRCSEL, 0, 3)
+REG32(GPU_REF_CTRL, 0x84)
+ FIELD(GPU_REF_CTRL, PP1_CLKACT, 26, 1)
+ FIELD(GPU_REF_CTRL, PP0_CLKACT, 25, 1)
+ FIELD(GPU_REF_CTRL, CLKACT, 24, 1)
+ FIELD(GPU_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(GPU_REF_CTRL, SRCSEL, 0, 3)
+REG32(SATA_REF_CTRL, 0xa0)
+ FIELD(SATA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(SATA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(SATA_REF_CTRL, SRCSEL, 0, 3)
+REG32(PCIE_REF_CTRL, 0xb4)
+ FIELD(PCIE_REF_CTRL, CLKACT, 24, 1)
+ FIELD(PCIE_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(PCIE_REF_CTRL, SRCSEL, 0, 3)
+REG32(GDMA_REF_CTRL, 0xb8)
+ FIELD(GDMA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(GDMA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(GDMA_REF_CTRL, SRCSEL, 0, 3)
+REG32(DPDMA_REF_CTRL, 0xbc)
+ FIELD(DPDMA_REF_CTRL, CLKACT, 24, 1)
+ FIELD(DPDMA_REF_CTRL, DIVISOR0, 8, 6)
+ FIELD(DPDMA_REF_CTRL, SRCSEL, 0, 3)
+REG32(TOPSW_MAIN_CTRL, 0xc0)
+ FIELD(TOPSW_MAIN_CTRL, CLKACT, 24, 1)
+ FIELD(TOPSW_MAIN_CTRL, DIVISOR0, 8, 6)
+ FIELD(TOPSW_MAIN_CTRL, SRCSEL, 0, 3)
+REG32(TOPSW_LSBUS_CTRL, 0xc4)
+ FIELD(TOPSW_LSBUS_CTRL, CLKACT, 24, 1)
+ FIELD(TOPSW_LSBUS_CTRL, DIVISOR0, 8, 6)
+ FIELD(TOPSW_LSBUS_CTRL, SRCSEL, 0, 3)
+REG32(DBG_TSTMP_CTRL, 0xf8)
+ FIELD(DBG_TSTMP_CTRL, DIVISOR0, 8, 6)
+ FIELD(DBG_TSTMP_CTRL, SRCSEL, 0, 3)
+REG32(RST_FPD_TOP, 0x100)
+ FIELD(RST_FPD_TOP, PCIE_CFG_RESET, 19, 1)
+ FIELD(RST_FPD_TOP, PCIE_BRIDGE_RESET, 18, 1)
+ FIELD(RST_FPD_TOP, PCIE_CTRL_RESET, 17, 1)
+ FIELD(RST_FPD_TOP, DP_RESET, 16, 1)
+ FIELD(RST_FPD_TOP, SWDT_RESET, 15, 1)
+ FIELD(RST_FPD_TOP, AFI_FM5_RESET, 12, 1)
+ FIELD(RST_FPD_TOP, AFI_FM4_RESET, 11, 1)
+ FIELD(RST_FPD_TOP, AFI_FM3_RESET, 10, 1)
+ FIELD(RST_FPD_TOP, AFI_FM2_RESET, 9, 1)
+ FIELD(RST_FPD_TOP, AFI_FM1_RESET, 8, 1)
+ FIELD(RST_FPD_TOP, AFI_FM0_RESET, 7, 1)
+ FIELD(RST_FPD_TOP, GDMA_RESET, 6, 1)
+ FIELD(RST_FPD_TOP, GPU_PP1_RESET, 5, 1)
+ FIELD(RST_FPD_TOP, GPU_PP0_RESET, 4, 1)
+ FIELD(RST_FPD_TOP, GPU_RESET, 3, 1)
+ FIELD(RST_FPD_TOP, GT_RESET, 2, 1)
+ FIELD(RST_FPD_TOP, SATA_RESET, 1, 1)
+REG32(RST_FPD_APU, 0x104)
+ FIELD(RST_FPD_APU, ACPU3_PWRON_RESET, 13, 1)
+ FIELD(RST_FPD_APU, ACPU2_PWRON_RESET, 12, 1)
+ FIELD(RST_FPD_APU, ACPU1_PWRON_RESET, 11, 1)
+ FIELD(RST_FPD_APU, ACPU0_PWRON_RESET, 10, 1)
+ FIELD(RST_FPD_APU, APU_L2_RESET, 8, 1)
+ FIELD(RST_FPD_APU, ACPU3_RESET, 3, 1)
+ FIELD(RST_FPD_APU, ACPU2_RESET, 2, 1)
+ FIELD(RST_FPD_APU, ACPU1_RESET, 1, 1)
+ FIELD(RST_FPD_APU, ACPU0_RESET, 0, 1)
+REG32(RST_DDR_SS, 0x108)
+ FIELD(RST_DDR_SS, DDR_RESET, 3, 1)
+ FIELD(RST_DDR_SS, APM_RESET, 2, 1)
+
+#define CRF_R_MAX (R_RST_DDR_SS + 1)
+
+struct XlnxZynqMPCRF {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_ir;
+
+ RegisterInfoArray *reg_array;
+ uint32_t regs[CRF_R_MAX];
+ RegisterInfo regs_info[CRF_R_MAX];
+};
+
+#endif
diff --git a/include/hw/net/allwinner-sun8i-emac.h b/include/hw/net/allwinner-sun8i-emac.h
index eda034e96b..185895f4e1 100644
--- a/include/hw/net/allwinner-sun8i-emac.h
+++ b/include/hw/net/allwinner-sun8i-emac.h
@@ -30,15 +30,14 @@
*/
#define TYPE_AW_SUN8I_EMAC "allwinner-sun8i-emac"
-#define AW_SUN8I_EMAC(obj) \
- OBJECT_CHECK(AwSun8iEmacState, (obj), TYPE_AW_SUN8I_EMAC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwSun8iEmacState, AW_SUN8I_EMAC)
/** @} */
/**
* Allwinner Sun8i EMAC object instance state
*/
-typedef struct AwSun8iEmacState {
+struct AwSun8iEmacState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -49,6 +48,12 @@ typedef struct AwSun8iEmacState {
/** Interrupt output signal to notify CPU */
qemu_irq irq;
+ /** Memory region where DMA transfers are done */
+ MemoryRegion *dma_mr;
+
+ /** Address space used internally for DMA transfers */
+ AddressSpace dma_as;
+
/** Generic Network Interface Controller (NIC) for networking API */
NICState *nic;
@@ -94,6 +99,6 @@ typedef struct AwSun8iEmacState {
/** @} */
-} AwSun8iEmacState;
+};
-#endif /* HW_NET_ALLWINNER_SUN8I_H */
+#endif /* HW_NET_ALLWINNER_SUN8I_EMAC_H */
diff --git a/include/hw/net/allwinner_emac.h b/include/hw/net/allwinner_emac.h
index 5013207d15..534e748982 100644
--- a/include/hw/net/allwinner_emac.h
+++ b/include/hw/net/allwinner_emac.h
@@ -28,9 +28,10 @@
#include "qemu/fifo8.h"
#include "hw/net/mii.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_AW_EMAC "allwinner-emac"
-#define AW_EMAC(obj) OBJECT_CHECK(AwEmacState, (obj), TYPE_AW_EMAC)
+OBJECT_DECLARE_SIMPLE_TYPE(AwEmacState, AW_EMAC)
/*
* Allwinner EMAC register list
@@ -144,7 +145,7 @@ typedef struct RTL8201CPState {
uint16_t anlpar;
} RTL8201CPState;
-typedef struct AwEmacState {
+struct AwEmacState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -171,6 +172,6 @@ typedef struct AwEmacState {
Fifo8 tx_fifo[NUM_TX_FIFOS];
uint32_t tx_length[NUM_TX_FIFOS];
uint32_t tx_channel;
-} AwEmacState;
+};
#endif
diff --git a/include/hw/net/cadence_gem.h b/include/hw/net/cadence_gem.h
index 54e646ff79..91ebb5c8ae 100644
--- a/include/hw/net/cadence_gem.h
+++ b/include/hw/net/cadence_gem.h
@@ -24,9 +24,10 @@
#ifndef CADENCE_GEM_H
#define CADENCE_GEM_H
+#include "qom/object.h"
#define TYPE_CADENCE_GEM "cadence_gem"
-#define CADENCE_GEM(obj) OBJECT_CHECK(CadenceGEMState, (obj), TYPE_CADENCE_GEM)
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceGEMState, CADENCE_GEM)
#include "net/net.h"
#include "hw/sysbus.h"
@@ -43,7 +44,7 @@
#define MAX_JUMBO_FRAME_SIZE_MASK 0x3FFF
#define MAX_FRAME_SIZE MAX_JUMBO_FRAME_SIZE_MASK
-typedef struct CadenceGEMState {
+struct CadenceGEMState {
/*< private >*/
SysBusDevice parent_obj;
@@ -73,6 +74,8 @@ typedef struct CadenceGEMState {
/* Mask of register bits which are write 1 to clear */
uint32_t regs_w1c[CADENCE_GEM_MAXREG];
+ /* PHY address */
+ uint8_t phy_addr;
/* PHY registers backing store */
uint16_t phy_regs[32];
@@ -89,6 +92,6 @@ typedef struct CadenceGEMState {
uint32_t rx_desc[MAX_PRIORITY_QUEUES][DESC_MAX_NUM_WORDS];
bool sar_active[4];
-} CadenceGEMState;
+};
#endif
diff --git a/include/hw/net/dp8393x.h b/include/hw/net/dp8393x.h
new file mode 100644
index 0000000000..4a3f7478be
--- /dev/null
+++ b/include/hw/net/dp8393x.h
@@ -0,0 +1,60 @@
+/*
+ * QEMU NS SONIC DP8393x netcard
+ *
+ * Copyright (c) 2008-2009 Herve Poussineau
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_DP8393X_H
+#define HW_NET_DP8393X_H
+
+#include "hw/sysbus.h"
+#include "net/net.h"
+#include "exec/memory.h"
+
+#define SONIC_REG_COUNT 0x40
+
+#define TYPE_DP8393X "dp8393x"
+OBJECT_DECLARE_SIMPLE_TYPE(dp8393xState, DP8393X)
+
+struct dp8393xState {
+ SysBusDevice parent_obj;
+
+ /* Hardware */
+ uint8_t it_shift;
+ bool big_endian;
+ bool last_rba_is_full;
+ qemu_irq irq;
+ int irq_level;
+ QEMUTimer *watchdog;
+ int64_t wt_last_update;
+ NICConf conf;
+ NICState *nic;
+ MemoryRegion mmio;
+
+ /* Registers */
+ uint16_t cam[16][3];
+ uint16_t regs[SONIC_REG_COUNT];
+
+ /* Temporaries */
+ uint8_t tx_buffer[0x10000];
+ int loopback_packet;
+
+ /* Memory access */
+ MemoryRegion *dma_mr;
+ AddressSpace as;
+};
+
+#endif
diff --git a/include/hw/net/ftgmac100.h b/include/hw/net/ftgmac100.h
index ab37e7b2b8..765d1538a4 100644
--- a/include/hw/net/ftgmac100.h
+++ b/include/hw/net/ftgmac100.h
@@ -9,9 +9,10 @@
#ifndef FTGMAC100_H
#define FTGMAC100_H
+#include "qom/object.h"
#define TYPE_FTGMAC100 "ftgmac100"
-#define FTGMAC100(obj) OBJECT_CHECK(FTGMAC100State, (obj), TYPE_FTGMAC100)
+OBJECT_DECLARE_SIMPLE_TYPE(FTGMAC100State, FTGMAC100)
#include "hw/sysbus.h"
#include "net/net.h"
@@ -21,7 +22,7 @@
*/
#define FTGMAC100_MAX_FRAME_SIZE 9220
-typedef struct FTGMAC100State {
+struct FTGMAC100State {
/*< private >*/
SysBusDevice parent_obj;
@@ -64,15 +65,15 @@ typedef struct FTGMAC100State {
bool aspeed;
uint32_t txdes0_edotr;
uint32_t rxdes0_edorr;
-} FTGMAC100State;
+};
#define TYPE_ASPEED_MII "aspeed-mmi"
-#define ASPEED_MII(obj) OBJECT_CHECK(AspeedMiiState, (obj), TYPE_ASPEED_MII)
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedMiiState, ASPEED_MII)
/*
* AST2600 MII controller
*/
-typedef struct AspeedMiiState {
+struct AspeedMiiState {
/*< private >*/
SysBusDevice parent_obj;
@@ -81,6 +82,6 @@ typedef struct AspeedMiiState {
MemoryRegion iomem;
uint32_t phycr;
uint32_t phydata;
-} AspeedMiiState;
+};
#endif
diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h
index 9f03034b89..2d13290c78 100644
--- a/include/hw/net/imx_fec.h
+++ b/include/hw/net/imx_fec.h
@@ -23,9 +23,10 @@
#ifndef IMX_FEC_H
#define IMX_FEC_H
+#include "qom/object.h"
#define TYPE_IMX_FEC "imx.fec"
-#define IMX_FEC(obj) OBJECT_CHECK(IMXFECState, (obj), TYPE_IMX_FEC)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXFECState, IMX_FEC)
#define TYPE_IMX_ENET "imx.enet"
@@ -247,7 +248,7 @@ typedef struct {
#define FSL_IMX25_FEC_SIZE 0x4000
-typedef struct IMXFECState {
+struct IMXFECState {
/*< private >*/
SysBusDevice parent_obj;
@@ -269,11 +270,13 @@ typedef struct IMXFECState {
uint32_t phy_int;
uint32_t phy_int_mask;
uint32_t phy_num;
+ bool phy_connected;
+ struct IMXFECState *phy_consumer;
bool is_fec;
/* Buffer used to assemble a Tx frame */
uint8_t frame[ENET_MAX_FRAME_SIZE];
-} IMXFECState;
+};
#endif
diff --git a/include/hw/net/lan9118.h b/include/hw/net/lan9118.h
index 3d0c67f339..4bf9da7a63 100644
--- a/include/hw/net/lan9118.h
+++ b/include/hw/net/lan9118.h
@@ -15,6 +15,6 @@
#define TYPE_LAN9118 "lan9118"
-void lan9118_init(NICInfo *, uint32_t, qemu_irq);
+void lan9118_init(uint32_t, qemu_irq);
#endif
diff --git a/include/hw/net/lance.h b/include/hw/net/lance.h
index 0357f5f65c..f645d6af67 100644
--- a/include/hw/net/lance.h
+++ b/include/hw/net/lance.h
@@ -32,15 +32,17 @@
#include "net/net.h"
#include "hw/net/pcnet.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_LANCE "lance"
-#define SYSBUS_PCNET(obj) \
- OBJECT_CHECK(SysBusPCNetState, (obj), TYPE_LANCE)
+typedef struct SysBusPCNetState SysBusPCNetState;
+DECLARE_INSTANCE_CHECKER(SysBusPCNetState, SYSBUS_PCNET,
+ TYPE_LANCE)
-typedef struct {
+struct SysBusPCNetState {
SysBusDevice parent_obj;
PCNetState state;
-} SysBusPCNetState;
+};
#endif
diff --git a/include/hw/net/lasi_82596.h b/include/hw/net/lasi_82596.h
index e76ef8308e..439356ec19 100644
--- a/include/hw/net/lasi_82596.h
+++ b/include/hw/net/lasi_82596.h
@@ -10,20 +10,22 @@
#include "net/net.h"
#include "hw/net/i82596.h"
+#include "hw/sysbus.h"
#define TYPE_LASI_82596 "lasi_82596"
-#define SYSBUS_I82596(obj) \
- OBJECT_CHECK(SysBusI82596State, (obj), TYPE_LASI_82596)
+typedef struct SysBusI82596State SysBusI82596State;
+DECLARE_INSTANCE_CHECKER(SysBusI82596State, SYSBUS_I82596,
+ TYPE_LASI_82596)
-typedef struct {
+struct SysBusI82596State {
SysBusDevice parent_obj;
I82596State state;
uint16_t last_val;
int val_index:1;
-} SysBusI82596State;
+};
-SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space,
- hwaddr hpa, qemu_irq irq);
+SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, hwaddr hpa,
+ qemu_irq irq, gboolean match_default);
#endif
diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h
index 4ae4dcce7e..f7feddac9b 100644
--- a/include/hw/net/mii.h
+++ b/include/hw/net/mii.h
@@ -55,6 +55,7 @@
#define MII_BMCR_CTST (1 << 7) /* Collision test */
#define MII_BMCR_SPEED1000 (1 << 6) /* MSB of Speed (1000) */
+#define MII_BMSR_100T4 (1 << 15) /* Can do 100mbps T4 */
#define MII_BMSR_100TX_FD (1 << 14) /* Can do 100mbps, full-duplex */
#define MII_BMSR_100TX_HD (1 << 13) /* Can do 100mbps, half-duplex */
#define MII_BMSR_10T_FD (1 << 12) /* Can do 10mbps, full-duplex */
@@ -70,7 +71,7 @@
#define MII_BMSR_JABBER (1 << 1) /* Jabber detected */
#define MII_BMSR_EXTCAP (1 << 0) /* Ext-reg capability */
-#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymetric pause */
+#define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymmetric pause */
#define MII_ANAR_PAUSE (1 << 10) /* Try for pause */
#define MII_ANAR_TXFD (1 << 8)
#define MII_ANAR_TX (1 << 7)
@@ -81,20 +82,31 @@
#define MII_ANLPAR_ACK (1 << 14)
#define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */
#define MII_ANLPAR_PAUSE (1 << 10) /* can pause */
+#define MII_ANLPAR_T4 (1 << 9)
#define MII_ANLPAR_TXFD (1 << 8)
#define MII_ANLPAR_TX (1 << 7)
#define MII_ANLPAR_10FD (1 << 6)
#define MII_ANLPAR_10 (1 << 5)
#define MII_ANLPAR_CSMACD (1 << 0)
-#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANER_NP (1 << 2) /* Next Page Able */
+#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANNP_MP (1 << 13) /* Message Page */
+
+#define MII_CTRL1000_MASTER (1 << 11) /* MASTER-SLAVE Manual Configuration Value */
+#define MII_CTRL1000_PORT (1 << 10) /* T2_Repeater/DTE bit */
#define MII_CTRL1000_FULL (1 << 9) /* 1000BASE-T full duplex */
#define MII_CTRL1000_HALF (1 << 8) /* 1000BASE-T half duplex */
+#define MII_STAT1000_LOK (1 << 13) /* Local Receiver Status */
+#define MII_STAT1000_ROK (1 << 12) /* Remote Receiver Status */
#define MII_STAT1000_FULL (1 << 11) /* 1000BASE-T full duplex */
#define MII_STAT1000_HALF (1 << 10) /* 1000BASE-T half duplex */
+#define MII_EXTSTAT_1000T_FD (1 << 13) /* 1000BASE-T Full Duplex */
+#define MII_EXTSTAT_1000T_HD (1 << 12) /* 1000BASE-T Half Duplex */
+
/* List of vendor identifiers */
/* RealTek 8201 */
#define RTL8201CP_PHYID1 0x0000
diff --git a/include/hw/net/msf2-emac.h b/include/hw/net/msf2-emac.h
index 37966d3a81..846ba6e6dc 100644
--- a/include/hw/net/msf2-emac.h
+++ b/include/hw/net/msf2-emac.h
@@ -26,15 +26,15 @@
#include "exec/memory.h"
#include "net/net.h"
#include "net/eth.h"
+#include "qom/object.h"
#define TYPE_MSS_EMAC "msf2-emac"
-#define MSS_EMAC(obj) \
- OBJECT_CHECK(MSF2EmacState, (obj), TYPE_MSS_EMAC)
+OBJECT_DECLARE_SIMPLE_TYPE(MSF2EmacState, MSS_EMAC)
#define R_MAX (0x1a0 / 4)
#define PHY_MAX_REGS 32
-typedef struct MSF2EmacState {
+struct MSF2EmacState {
SysBusDevice parent;
MemoryRegion mmio;
@@ -50,4 +50,4 @@ typedef struct MSF2EmacState {
uint16_t phy_regs[PHY_MAX_REGS];
uint32_t regs[R_MAX];
-} MSF2EmacState;
+};
diff --git a/include/hw/net/mv88w8618_eth.h b/include/hw/net/mv88w8618_eth.h
new file mode 100644
index 0000000000..41074940ec
--- /dev/null
+++ b/include/hw/net/mv88w8618_eth.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell MV88W8618 / Freecom MusicPal emulation.
+ *
+ * Copyright (c) 2008-2021 QEMU contributors
+ */
+
+#ifndef HW_NET_MV88W8618_ETH_H
+#define HW_NET_MV88W8618_ETH_H
+
+#define TYPE_MV88W8618_ETH "mv88w8618_eth"
+
+#endif
diff --git a/include/hw/net/ne2000-isa.h b/include/hw/net/ne2000-isa.h
index af59ee0b02..73bae10ad1 100644
--- a/include/hw/net/ne2000-isa.h
+++ b/include/hw/net/ne2000-isa.h
@@ -22,8 +22,6 @@ static inline ISADevice *isa_ne2000_init(ISABus *bus, int base, int irq,
{
ISADevice *d;
- qemu_check_nic_model(nd, "ne2k_isa");
-
d = isa_try_new(TYPE_ISA_NE2000);
if (d) {
DeviceState *dev = DEVICE(d);
diff --git a/include/hw/net/npcm7xx_emc.h b/include/hw/net/npcm7xx_emc.h
new file mode 100644
index 0000000000..b789007160
--- /dev/null
+++ b/include/hw/net/npcm7xx_emc.h
@@ -0,0 +1,283 @@
+/*
+ * Nuvoton NPCM7xx EMC Module
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM7XX_EMC_H
+#define NPCM7XX_EMC_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "net/net.h"
+
+/* 32-bit register indices. */
+enum NPCM7xxPWMRegister {
+ /* Control registers. */
+ REG_CAMCMR,
+ REG_CAMEN,
+
+ /* There are 16 CAMn[ML] registers. */
+ REG_CAMM_BASE,
+ REG_CAML_BASE,
+ REG_CAMML_LAST = 0x21,
+
+ REG_TXDLSA = 0x22,
+ REG_RXDLSA,
+ REG_MCMDR,
+ REG_MIID,
+ REG_MIIDA,
+ REG_FFTCR,
+ REG_TSDR,
+ REG_RSDR,
+ REG_DMARFC,
+ REG_MIEN,
+
+ /* Status registers. */
+ REG_MISTA,
+ REG_MGSTA,
+ REG_MPCNT,
+ REG_MRPC,
+ REG_MRPCC,
+ REG_MREPC,
+ REG_DMARFS,
+ REG_CTXDSA,
+ REG_CTXBSA,
+ REG_CRXDSA,
+ REG_CRXBSA,
+
+ NPCM7XX_NUM_EMC_REGS,
+};
+
+/* REG_CAMCMR fields */
+/* Enable CAM Compare */
+#define REG_CAMCMR_ECMP (1 << 4)
+/* Complement CAM Compare */
+#define REG_CAMCMR_CCAM (1 << 3)
+/* Accept Broadcast Packet */
+#define REG_CAMCMR_ABP (1 << 2)
+/* Accept Multicast Packet */
+#define REG_CAMCMR_AMP (1 << 1)
+/* Accept Unicast Packet */
+#define REG_CAMCMR_AUP (1 << 0)
+
+/* REG_MCMDR fields */
+/* Software Reset */
+#define REG_MCMDR_SWR (1 << 24)
+/* Internal Loopback Select */
+#define REG_MCMDR_LBK (1 << 21)
+/* Operation Mode Select */
+#define REG_MCMDR_OPMOD (1 << 20)
+/* Enable MDC Clock Generation */
+#define REG_MCMDR_ENMDC (1 << 19)
+/* Full-Duplex Mode Select */
+#define REG_MCMDR_FDUP (1 << 18)
+/* Enable SQE Checking */
+#define REG_MCMDR_ENSEQ (1 << 17)
+/* Send PAUSE Frame */
+#define REG_MCMDR_SDPZ (1 << 16)
+/* No Defer */
+#define REG_MCMDR_NDEF (1 << 9)
+/* Frame Transmission On */
+#define REG_MCMDR_TXON (1 << 8)
+/* Strip CRC Checksum */
+#define REG_MCMDR_SPCRC (1 << 5)
+/* Accept CRC Error Packet */
+#define REG_MCMDR_AEP (1 << 4)
+/* Accept Control Packet */
+#define REG_MCMDR_ACP (1 << 3)
+/* Accept Runt Packet */
+#define REG_MCMDR_ARP (1 << 2)
+/* Accept Long Packet */
+#define REG_MCMDR_ALP (1 << 1)
+/* Frame Reception On */
+#define REG_MCMDR_RXON (1 << 0)
+
+/* REG_MIEN fields */
+/* Enable Transmit Descriptor Unavailable Interrupt */
+#define REG_MIEN_ENTDU (1 << 23)
+/* Enable Transmit Completion Interrupt */
+#define REG_MIEN_ENTXCP (1 << 18)
+/* Enable Transmit Interrupt */
+#define REG_MIEN_ENTXINTR (1 << 16)
+/* Enable Receive Descriptor Unavailable Interrupt */
+#define REG_MIEN_ENRDU (1 << 10)
+/* Enable Receive Good Interrupt */
+#define REG_MIEN_ENRXGD (1 << 4)
+/* Enable Receive Interrupt */
+#define REG_MIEN_ENRXINTR (1 << 0)
+
+/* REG_MISTA fields */
+/* TODO: Add error fields and support simulated errors? */
+/* Transmit Bus Error Interrupt */
+#define REG_MISTA_TXBERR (1 << 24)
+/* Transmit Descriptor Unavailable Interrupt */
+#define REG_MISTA_TDU (1 << 23)
+/* Transmit Completion Interrupt */
+#define REG_MISTA_TXCP (1 << 18)
+/* Transmit Interrupt */
+#define REG_MISTA_TXINTR (1 << 16)
+/* Receive Bus Error Interrupt */
+#define REG_MISTA_RXBERR (1 << 11)
+/* Receive Descriptor Unavailable Interrupt */
+#define REG_MISTA_RDU (1 << 10)
+/* DMA Early Notification Interrupt */
+#define REG_MISTA_DENI (1 << 9)
+/* Maximum Frame Length Interrupt */
+#define REG_MISTA_DFOI (1 << 8)
+/* Receive Good Interrupt */
+#define REG_MISTA_RXGD (1 << 4)
+/* Packet Too Long Interrupt */
+#define REG_MISTA_PTLE (1 << 3)
+/* Receive Interrupt */
+#define REG_MISTA_RXINTR (1 << 0)
+
+/* REG_MGSTA fields */
+/* Transmission Halted */
+#define REG_MGSTA_TXHA (1 << 11)
+/* Receive Halted */
+#define REG_MGSTA_RXHA (1 << 11)
+
+/* REG_DMARFC fields */
+/* Maximum Receive Frame Length */
+#define REG_DMARFC_RXMS(word) extract32((word), 0, 16)
+
+/* REG MIIDA fields */
+/* Busy Bit */
+#define REG_MIIDA_BUSY (1 << 17)
+
+/* Transmit and receive descriptors */
+typedef struct NPCM7xxEMCTxDesc NPCM7xxEMCTxDesc;
+typedef struct NPCM7xxEMCRxDesc NPCM7xxEMCRxDesc;
+
+struct NPCM7xxEMCTxDesc {
+ uint32_t flags;
+ uint32_t txbsa;
+ uint32_t status_and_length;
+ uint32_t ntxdsa;
+};
+
+struct NPCM7xxEMCRxDesc {
+ uint32_t status_and_length;
+ uint32_t rxbsa;
+ uint32_t reserved;
+ uint32_t nrxdsa;
+};
+
+/* NPCM7xxEMCTxDesc.flags values */
+/* Owner: 0 = cpu, 1 = emc */
+#define TX_DESC_FLAG_OWNER_MASK (1 << 31)
+/* Transmit interrupt enable */
+#define TX_DESC_FLAG_INTEN (1 << 2)
+/* CRC append */
+#define TX_DESC_FLAG_CRCAPP (1 << 1)
+/* Padding enable */
+#define TX_DESC_FLAG_PADEN (1 << 0)
+
+/* NPCM7xxEMCTxDesc.status_and_length values */
+/* Collision count */
+#define TX_DESC_STATUS_CCNT_SHIFT 28
+#define TX_DESC_STATUS_CCNT_BITSIZE 4
+/* SQE error */
+#define TX_DESC_STATUS_SQE (1 << 26)
+/* Transmission paused */
+#define TX_DESC_STATUS_PAU (1 << 25)
+/* P transmission halted */
+#define TX_DESC_STATUS_TXHA (1 << 24)
+/* Late collision */
+#define TX_DESC_STATUS_LC (1 << 23)
+/* Transmission abort */
+#define TX_DESC_STATUS_TXABT (1 << 22)
+/* No carrier sense */
+#define TX_DESC_STATUS_NCS (1 << 21)
+/* Defer exceed */
+#define TX_DESC_STATUS_EXDEF (1 << 20)
+/* Transmission complete */
+#define TX_DESC_STATUS_TXCP (1 << 19)
+/* Transmission deferred */
+#define TX_DESC_STATUS_DEF (1 << 17)
+/* Transmit interrupt */
+#define TX_DESC_STATUS_TXINTR (1 << 16)
+
+#define TX_DESC_PKT_LEN(word) extract32((word), 0, 16)
+
+/* Transmit buffer start address */
+#define TX_DESC_TXBSA(word) ((uint32_t) (word) & ~3u)
+
+/* Next transmit descriptor start address */
+#define TX_DESC_NTXDSA(word) ((uint32_t) (word) & ~3u)
+
+/* NPCM7xxEMCRxDesc.status_and_length values */
+/* Owner: 0b00 = cpu, 0b01 = undefined, 0b10 = emc, 0b11 = undefined */
+#define RX_DESC_STATUS_OWNER_SHIFT 30
+#define RX_DESC_STATUS_OWNER_BITSIZE 2
+#define RX_DESC_STATUS_OWNER_MASK (3 << RX_DESC_STATUS_OWNER_SHIFT)
+/* Runt packet */
+#define RX_DESC_STATUS_RP (1 << 22)
+/* Alignment error */
+#define RX_DESC_STATUS_ALIE (1 << 21)
+/* Frame reception complete */
+#define RX_DESC_STATUS_RXGD (1 << 20)
+/* Packet too long */
+#define RX_DESC_STATUS_PTLE (1 << 19)
+/* CRC error */
+#define RX_DESC_STATUS_CRCE (1 << 17)
+/* Receive interrupt */
+#define RX_DESC_STATUS_RXINTR (1 << 16)
+
+#define RX_DESC_PKT_LEN(word) extract32((word), 0, 16)
+
+/* Receive buffer start address */
+#define RX_DESC_RXBSA(word) ((uint32_t) (word) & ~3u)
+
+/* Next receive descriptor start address */
+#define RX_DESC_NRXDSA(word) ((uint32_t) (word) & ~3u)
+
+/* Minimum packet length, when TX_DESC_FLAG_PADEN is set. */
+#define MIN_PACKET_LENGTH 64
+
+struct NPCM7xxEMCState {
+ /*< private >*/
+ SysBusDevice parent;
+ /*< public >*/
+
+ MemoryRegion iomem;
+
+ qemu_irq tx_irq;
+ qemu_irq rx_irq;
+
+ NICState *nic;
+ NICConf conf;
+
+ /* 0 or 1, for log messages */
+ uint8_t emc_num;
+
+ uint32_t regs[NPCM7XX_NUM_EMC_REGS];
+
+ /*
+ * tx is active. Set to true by TSDR and then switches off when out of
+ * descriptors. If the TXON bit in REG_MCMDR is off then this is off.
+ */
+ bool tx_active;
+
+ /*
+ * rx is active. Set to true by RSDR and then switches off when out of
+ * descriptors. If the RXON bit in REG_MCMDR is off then this is off.
+ */
+ bool rx_active;
+};
+
+#define TYPE_NPCM7XX_EMC "npcm7xx-emc"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxEMCState, NPCM7XX_EMC)
+
+#endif /* NPCM7XX_EMC_H */
diff --git a/include/hw/net/npcm_gmac.h b/include/hw/net/npcm_gmac.h
new file mode 100644
index 0000000000..6340ffe92c
--- /dev/null
+++ b/include/hw/net/npcm_gmac.h
@@ -0,0 +1,343 @@
+/*
+ * Nuvoton NPCM7xx/8xx GMAC Module
+ *
+ * Copyright 2024 Google LLC
+ * Authors:
+ * Hao Wu <wuhaotsh@google.com>
+ * Nabih Estefan <nabihestefan@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM_GMAC_H
+#define NPCM_GMAC_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "net/net.h"
+
+#define NPCM_GMAC_NR_REGS (0x1060 / sizeof(uint32_t))
+
+#define NPCM_GMAC_MAX_PHYS 32
+#define NPCM_GMAC_MAX_PHY_REGS 32
+
+struct NPCMGMACRxDesc {
+ uint32_t rdes0;
+ uint32_t rdes1;
+ uint32_t rdes2;
+ uint32_t rdes3;
+};
+
+/* NPCMGMACRxDesc.flags values */
+/* RDES2 and RDES3 are buffer addresses */
+/* Owner: 0 = software, 1 = dma */
+#define RX_DESC_RDES0_OWN BIT(31)
+/* Destination Address Filter Fail */
+#define RX_DESC_RDES0_DEST_ADDR_FILT_FAIL BIT(30)
+/* Frame length */
+#define RX_DESC_RDES0_FRAME_LEN_MASK(word) extract32(word, 16, 14)
+/* Frame length Shift*/
+#define RX_DESC_RDES0_FRAME_LEN_SHIFT 16
+/* Error Summary */
+#define RX_DESC_RDES0_ERR_SUMM_MASK BIT(15)
+/* Descriptor Error */
+#define RX_DESC_RDES0_DESC_ERR_MASK BIT(14)
+/* Source Address Filter Fail */
+#define RX_DESC_RDES0_SRC_ADDR_FILT_FAIL_MASK BIT(13)
+/* Length Error */
+#define RX_DESC_RDES0_LEN_ERR_MASK BIT(12)
+/* Overflow Error */
+#define RX_DESC_RDES0_OVRFLW_ERR_MASK BIT(11)
+/* VLAN Tag */
+#define RX_DESC_RDES0_VLAN_TAG_MASK BIT(10)
+/* First Descriptor */
+#define RX_DESC_RDES0_FIRST_DESC_MASK BIT(9)
+/* Last Descriptor */
+#define RX_DESC_RDES0_LAST_DESC_MASK BIT(8)
+/* IPC Checksum Error/Giant Frame */
+#define RX_DESC_RDES0_IPC_CHKSM_ERR_GNT_FRM_MASK BIT(7)
+/* Late Collision */
+#define RX_DESC_RDES0_LT_COLL_MASK BIT(6)
+/* Frame Type */
+#define RX_DESC_RDES0_FRM_TYPE_MASK BIT(5)
+/* Receive Watchdog Timeout */
+#define RX_DESC_RDES0_REC_WTCHDG_TMT_MASK BIT(4)
+/* Receive Error */
+#define RX_DESC_RDES0_RCV_ERR_MASK BIT(3)
+/* Dribble Bit Error */
+#define RX_DESC_RDES0_DRBL_BIT_ERR_MASK BIT(2)
+/* Cyclcic Redundancy Check Error */
+#define RX_DESC_RDES0_CRC_ERR_MASK BIT(1)
+/* Rx MAC Address/Payload Checksum Error */
+#define RC_DESC_RDES0_RCE_MASK BIT(0)
+
+/* Disable Interrupt on Completion */
+#define RX_DESC_RDES1_DIS_INTR_COMP_MASK BIT(31)
+/* Receive end of ring */
+#define RX_DESC_RDES1_RC_END_RING_MASK BIT(25)
+/* Second Address Chained */
+#define RX_DESC_RDES1_SEC_ADDR_CHND_MASK BIT(24)
+/* Receive Buffer 2 Size */
+#define RX_DESC_RDES1_BFFR2_SZ_SHIFT 11
+#define RX_DESC_RDES1_BFFR2_SZ_MASK(word) extract32(word, \
+ RX_DESC_RDES1_BFFR2_SZ_SHIFT, 11)
+/* Receive Buffer 1 Size */
+#define RX_DESC_RDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11)
+
+
+struct NPCMGMACTxDesc {
+ uint32_t tdes0;
+ uint32_t tdes1;
+ uint32_t tdes2;
+ uint32_t tdes3;
+};
+
+/* NPCMGMACTxDesc.flags values */
+/* TDES2 and TDES3 are buffer addresses */
+/* Owner: 0 = software, 1 = gmac */
+#define TX_DESC_TDES0_OWN BIT(31)
+/* Tx Time Stamp Status */
+#define TX_DESC_TDES0_TTSS_MASK BIT(17)
+/* IP Header Error */
+#define TX_DESC_TDES0_IP_HEAD_ERR_MASK BIT(16)
+/* Error Summary */
+#define TX_DESC_TDES0_ERR_SUMM_MASK BIT(15)
+/* Jabber Timeout */
+#define TX_DESC_TDES0_JBBR_TMT_MASK BIT(14)
+/* Frame Flushed */
+#define TX_DESC_TDES0_FRM_FLSHD_MASK BIT(13)
+/* Payload Checksum Error */
+#define TX_DESC_TDES0_PYLD_CHKSM_ERR_MASK BIT(12)
+/* Loss of Carrier */
+#define TX_DESC_TDES0_LSS_CARR_MASK BIT(11)
+/* No Carrier */
+#define TX_DESC_TDES0_NO_CARR_MASK BIT(10)
+/* Late Collision */
+#define TX_DESC_TDES0_LATE_COLL_MASK BIT(9)
+/* Excessive Collision */
+#define TX_DESC_TDES0_EXCS_COLL_MASK BIT(8)
+/* VLAN Frame */
+#define TX_DESC_TDES0_VLAN_FRM_MASK BIT(7)
+/* Collision Count */
+#define TX_DESC_TDES0_COLL_CNT_MASK(word) extract32(word, 3, 4)
+/* Excessive Deferral */
+#define TX_DESC_TDES0_EXCS_DEF_MASK BIT(2)
+/* Underflow Error */
+#define TX_DESC_TDES0_UNDRFLW_ERR_MASK BIT(1)
+/* Deferred Bit */
+#define TX_DESC_TDES0_DFRD_BIT_MASK BIT(0)
+
+/* Interrupt of Completion */
+#define TX_DESC_TDES1_INTERR_COMP_MASK BIT(31)
+/* Last Segment */
+#define TX_DESC_TDES1_LAST_SEG_MASK BIT(30)
+/* First Segment */
+#define TX_DESC_TDES1_FIRST_SEG_MASK BIT(29)
+/* Checksum Insertion Control */
+#define TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(word) extract32(word, 27, 2)
+/* Disable Cyclic Redundancy Check */
+#define TX_DESC_TDES1_DIS_CDC_MASK BIT(26)
+/* Transmit End of Ring */
+#define TX_DESC_TDES1_TX_END_RING_MASK BIT(25)
+/* Secondary Address Chained */
+#define TX_DESC_TDES1_SEC_ADDR_CHND_MASK BIT(24)
+/* Transmit Buffer 2 Size */
+#define TX_DESC_TDES1_BFFR2_SZ_MASK(word) extract32(word, 11, 11)
+/* Transmit Buffer 1 Size */
+#define TX_DESC_TDES1_BFFR1_SZ_MASK(word) extract32(word, 0, 11)
+
+typedef struct NPCMGMACState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ NICState *nic;
+ NICConf conf;
+
+ uint32_t regs[NPCM_GMAC_NR_REGS];
+ uint16_t phy_regs[NPCM_GMAC_MAX_PHYS][NPCM_GMAC_MAX_PHY_REGS];
+} NPCMGMACState;
+
+#define TYPE_NPCM_GMAC "npcm-gmac"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCMGMACState, NPCM_GMAC)
+
+/* Mask for RO bits in Status */
+#define NPCM_DMA_STATUS_RO_MASK(word) (word & 0xfffe0000)
+/* Mask for RO bits in Status */
+#define NPCM_DMA_STATUS_W1C_MASK(word) (word & 0x1e7ff)
+
+/* Transmit Process State */
+#define NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT 20
+/* Transmit States */
+#define NPCM_DMA_STATUS_TX_STOPPED_STATE \
+ (0b000)
+#define NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE \
+ (0b001)
+#define NPCM_DMA_STATUS_TX_RUNNING_WAITING_STATE \
+ (0b010)
+#define NPCM_DMA_STATUS_TX_RUNNING_READ_STATE \
+ (0b011)
+#define NPCM_DMA_STATUS_TX_SUSPENDED_STATE \
+ (0b110)
+#define NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE \
+ (0b111)
+/* Transmit Process State */
+#define NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT 17
+/* Receive States */
+#define NPCM_DMA_STATUS_RX_STOPPED_STATE \
+ (0b000)
+#define NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE \
+ (0b001)
+#define NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE \
+ (0b011)
+#define NPCM_DMA_STATUS_RX_SUSPENDED_STATE \
+ (0b100)
+#define NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE \
+ (0b101)
+#define NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE \
+ (0b111)
+
+
+/* Early Receive Interrupt */
+#define NPCM_DMA_STATUS_ERI BIT(14)
+/* Fatal Bus Error Interrupt */
+#define NPCM_DMA_STATUS_FBI BIT(13)
+/* Early transmit Interrupt */
+#define NPCM_DMA_STATUS_ETI BIT(10)
+/* Receive Watchdog Timeout */
+#define NPCM_DMA_STATUS_RWT BIT(9)
+/* Receive Process Stopped */
+#define NPCM_DMA_STATUS_RPS BIT(8)
+/* Receive Buffer Unavailable */
+#define NPCM_DMA_STATUS_RU BIT(7)
+/* Receive Interrupt */
+#define NPCM_DMA_STATUS_RI BIT(6)
+/* Transmit Underflow */
+#define NPCM_DMA_STATUS_UNF BIT(5)
+/* Receive Overflow */
+#define NPCM_DMA_STATUS_OVF BIT(4)
+/* Transmit Jabber Timeout */
+#define NPCM_DMA_STATUS_TJT BIT(3)
+/* Transmit Buffer Unavailable */
+#define NPCM_DMA_STATUS_TU BIT(2)
+/* Transmit Process Stopped */
+#define NPCM_DMA_STATUS_TPS BIT(1)
+/* Transmit Interrupt */
+#define NPCM_DMA_STATUS_TI BIT(0)
+
+/* Normal Interrupt Summary */
+#define NPCM_DMA_STATUS_NIS BIT(16)
+/* Interrupts enabled by NIE */
+#define NPCM_DMA_STATUS_NIS_BITS (NPCM_DMA_STATUS_TI | \
+ NPCM_DMA_STATUS_TU | \
+ NPCM_DMA_STATUS_RI | \
+ NPCM_DMA_STATUS_ERI)
+/* Abnormal Interrupt Summary */
+#define NPCM_DMA_STATUS_AIS BIT(15)
+/* Interrupts enabled by AIE */
+#define NPCM_DMA_STATUS_AIS_BITS (NPCM_DMA_STATUS_TPS | \
+ NPCM_DMA_STATUS_TJT | \
+ NPCM_DMA_STATUS_OVF | \
+ NPCM_DMA_STATUS_UNF | \
+ NPCM_DMA_STATUS_RU | \
+ NPCM_DMA_STATUS_RPS | \
+ NPCM_DMA_STATUS_RWT | \
+ NPCM_DMA_STATUS_ETI | \
+ NPCM_DMA_STATUS_FBI)
+
+/* Early Receive Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_ERE BIT(14)
+/* Fatal Bus Error Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_FBE BIT(13)
+/* Early transmit Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_ETE BIT(10)
+/* Receive Watchdog Timout Enable */
+#define NPCM_DMA_INTR_ENAB_RWE BIT(9)
+/* Receive Process Stopped Enable */
+#define NPCM_DMA_INTR_ENAB_RSE BIT(8)
+/* Receive Buffer Unavailable Enable */
+#define NPCM_DMA_INTR_ENAB_RUE BIT(7)
+/* Receive Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_RIE BIT(6)
+/* Transmit Underflow Enable */
+#define NPCM_DMA_INTR_ENAB_UNE BIT(5)
+/* Receive Overflow Enable */
+#define NPCM_DMA_INTR_ENAB_OVE BIT(4)
+/* Transmit Jabber Timeout Enable */
+#define NPCM_DMA_INTR_ENAB_TJE BIT(3)
+/* Transmit Buffer Unavailable Enable */
+#define NPCM_DMA_INTR_ENAB_TUE BIT(2)
+/* Transmit Process Stopped Enable */
+#define NPCM_DMA_INTR_ENAB_TSE BIT(1)
+/* Transmit Interrupt Enable */
+#define NPCM_DMA_INTR_ENAB_TIE BIT(0)
+
+/* Normal Interrupt Summary Enable */
+#define NPCM_DMA_INTR_ENAB_NIE BIT(16)
+/* Interrupts enabled by NIE Enable */
+#define NPCM_DMA_INTR_ENAB_NIE_BITS (NPCM_DMA_INTR_ENAB_TIE | \
+ NPCM_DMA_INTR_ENAB_TUE | \
+ NPCM_DMA_INTR_ENAB_RIE | \
+ NPCM_DMA_INTR_ENAB_ERE)
+/* Abnormal Interrupt Summary Enable */
+#define NPCM_DMA_INTR_ENAB_AIE BIT(15)
+/* Interrupts enabled by AIE Enable */
+#define NPCM_DMA_INTR_ENAB_AIE_BITS (NPCM_DMA_INTR_ENAB_TSE | \
+ NPCM_DMA_INTR_ENAB_TJE | \
+ NPCM_DMA_INTR_ENAB_OVE | \
+ NPCM_DMA_INTR_ENAB_UNE | \
+ NPCM_DMA_INTR_ENAB_RUE | \
+ NPCM_DMA_INTR_ENAB_RSE | \
+ NPCM_DMA_INTR_ENAB_RWE | \
+ NPCM_DMA_INTR_ENAB_ETE | \
+ NPCM_DMA_INTR_ENAB_FBE)
+
+/* Flushing Disabled */
+#define NPCM_DMA_CONTROL_FLUSH_MASK BIT(24)
+/* Start/stop Transmit */
+#define NPCM_DMA_CONTROL_START_STOP_TX BIT(13)
+/* Start/stop Receive */
+#define NPCM_DMA_CONTROL_START_STOP_RX BIT(1)
+/* Next receive descriptor start address */
+#define NPCM_DMA_HOST_RX_DESC_MASK(word) ((uint32_t) (word) & ~3u)
+/* Next transmit descriptor start address */
+#define NPCM_DMA_HOST_TX_DESC_MASK(word) ((uint32_t) (word) & ~3u)
+
+/* Receive enable */
+#define NPCM_GMAC_MAC_CONFIG_RX_EN BIT(2)
+/* Transmit enable */
+#define NPCM_GMAC_MAC_CONFIG_TX_EN BIT(3)
+
+/* Frame Receive All */
+#define NPCM_GMAC_FRAME_FILTER_REC_ALL_MASK BIT(31)
+/* Frame HPF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HPF_MASK BIT(10)
+/* Frame SAF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_SAF_MASK BIT(9)
+/* Frame SAIF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_SAIF_MASK BIT(8)
+/* Frame PCF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PCF_MASK BIT(word) extract32((word), 6, 2)
+/* Frame DBF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_DBF_MASK BIT(5)
+/* Frame PM Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PM_MASK BIT(4)
+/* Frame DAIF Filter*/
+#define NPCM_GMAC_FRAME_FILTER_DAIF_MASK BIT(3)
+/* Frame HMC Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HMC_MASK BIT(2)
+/* Frame HUC Filter*/
+#define NPCM_GMAC_FRAME_FILTER_HUC_MASK BIT(1)
+/* Frame PR Filter*/
+#define NPCM_GMAC_FRAME_FILTER_PR_MASK BIT(0)
+
+#endif /* NPCM_GMAC_H */
diff --git a/include/hw/net/smc91c111.h b/include/hw/net/smc91c111.h
index df5b11dcef..dba32a233f 100644
--- a/include/hw/net/smc91c111.h
+++ b/include/hw/net/smc91c111.h
@@ -13,6 +13,6 @@
#include "net/net.h"
-void smc91c111_init(NICInfo *, uint32_t, qemu_irq);
+void smc91c111_init(uint32_t, qemu_irq);
#endif
diff --git a/include/hw/net/xlnx-versal-canfd.h b/include/hw/net/xlnx-versal-canfd.h
new file mode 100644
index 0000000000..ad3104dd13
--- /dev/null
+++ b/include/hw/net/xlnx-versal-canfd.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU model of the Xilinx Versal CANFD Controller.
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ *
+ * Written-by: Vikram Garhwal<vikram.garhwal@amd.com>
+ * Based on QEMU CANFD Device emulation implemented by Jin Yang, Deniz Eren and
+ * Pavel Pisa.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_CANFD_XILINX_H
+#define HW_CANFD_XILINX_H
+
+#include "hw/register.h"
+#include "hw/ptimer.h"
+#include "net/can_emu.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_XILINX_CANFD "xlnx.versal-canfd"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCANFDState, XILINX_CANFD)
+
+#define NUM_REGS_PER_MSG_SPACE 18 /* 1 ID + 1 DLC + 16 Data(DW0 - DW15) regs. */
+#define MAX_NUM_RX 64
+#define OFFSET_RX1_DW15 (0x4144 / 4)
+#define CANFD_TIMER_MAX 0xFFFFUL
+#define CANFD_DEFAULT_CLOCK (25 * 1000 * 1000)
+
+#define XLNX_VERSAL_CANFD_R_MAX (OFFSET_RX1_DW15 + \
+ ((MAX_NUM_RX - 1) * NUM_REGS_PER_MSG_SPACE) + 1)
+
+typedef struct XlnxVersalCANFDState {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ qemu_irq irq_canfd_int;
+ qemu_irq irq_addr_err;
+
+ RegisterInfo reg_info[XLNX_VERSAL_CANFD_R_MAX];
+ RegisterAccessInfo *tx_regs;
+ RegisterAccessInfo *rx0_regs;
+ RegisterAccessInfo *rx1_regs;
+ RegisterAccessInfo *af_regs;
+ RegisterAccessInfo *txe_regs;
+ RegisterAccessInfo *rx_mailbox_regs;
+ RegisterAccessInfo *af_mask_regs_mailbox;
+
+ uint32_t regs[XLNX_VERSAL_CANFD_R_MAX];
+
+ ptimer_state *canfd_timer;
+
+ CanBusClientState bus_client;
+ CanBusState *canfdbus;
+
+ struct {
+ uint8_t rx0_fifo;
+ uint8_t rx1_fifo;
+ uint8_t tx_fifo;
+ bool enable_rx_fifo1;
+ uint32_t ext_clk_freq;
+ } cfg;
+
+} XlnxVersalCANFDState;
+
+typedef struct tx_ready_reg_info {
+ uint32_t can_id;
+ uint32_t reg_num;
+} tx_ready_reg_info;
+
+#endif
diff --git a/include/hw/net/xlnx-zynqmp-can.h b/include/hw/net/xlnx-zynqmp-can.h
new file mode 100644
index 0000000000..fd2aa77760
--- /dev/null
+++ b/include/hw/net/xlnx-zynqmp-can.h
@@ -0,0 +1,79 @@
+/*
+ * QEMU model of the Xilinx ZynqMP CAN controller.
+ *
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
+ *
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
+ * Pavel Pisa.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_ZYNQMP_CAN_H
+#define XLNX_ZYNQMP_CAN_H
+
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "net/can_emu.h"
+#include "net/can_host.h"
+#include "qemu/fifo32.h"
+#include "hw/ptimer.h"
+#include "hw/qdev-clock.h"
+
+#define TYPE_XLNX_ZYNQMP_CAN "xlnx.zynqmp-can"
+
+#define XLNX_ZYNQMP_CAN(obj) \
+ OBJECT_CHECK(XlnxZynqMPCANState, (obj), TYPE_XLNX_ZYNQMP_CAN)
+
+#define MAX_CAN_CTRLS 2
+#define XLNX_ZYNQMP_CAN_R_MAX (0x84 / 4)
+#define MAILBOX_CAPACITY 64
+#define CAN_TIMER_MAX 0XFFFFUL
+#define CAN_DEFAULT_CLOCK (24 * 1000 * 1000)
+
+/* Each CAN_FRAME will have 4 * 32bit size. */
+#define CAN_FRAME_SIZE 4
+#define RXFIFO_SIZE (MAILBOX_CAPACITY * CAN_FRAME_SIZE)
+
+typedef struct XlnxZynqMPCANState {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ qemu_irq irq;
+
+ CanBusClientState bus_client;
+ CanBusState *canbus;
+
+ struct {
+ uint32_t ext_clk_freq;
+ } cfg;
+
+ RegisterInfo reg_info[XLNX_ZYNQMP_CAN_R_MAX];
+ uint32_t regs[XLNX_ZYNQMP_CAN_R_MAX];
+
+ Fifo32 rx_fifo;
+ Fifo32 tx_fifo;
+ Fifo32 txhpb_fifo;
+
+ ptimer_state *can_timer;
+} XlnxZynqMPCANState;
+
+#endif
diff --git a/include/hw/nmi.h b/include/hw/nmi.h
index fe37ce3ad8..fff41bebc6 100644
--- a/include/hw/nmi.h
+++ b/include/hw/nmi.h
@@ -26,20 +26,19 @@
#define TYPE_NMI "nmi"
-#define NMI_CLASS(klass) \
- OBJECT_CLASS_CHECK(NMIClass, (klass), TYPE_NMI)
-#define NMI_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NMIClass, (obj), TYPE_NMI)
+typedef struct NMIClass NMIClass;
+DECLARE_CLASS_CHECKERS(NMIClass, NMI,
+ TYPE_NMI)
#define NMI(obj) \
INTERFACE_CHECK(NMIState, (obj), TYPE_NMI)
typedef struct NMIState NMIState;
-typedef struct NMIClass {
+struct NMIClass {
InterfaceClass parent_class;
void (*nmi_monitor_handler)(NMIState *n, int cpu_index, Error **errp);
-} NMIClass;
+};
void nmi_monitor_handle(int cpu_index, Error **errp);
diff --git a/include/hw/nubus/mac-nubus-bridge.h b/include/hw/nubus/mac-nubus-bridge.h
index ce9c789d99..be4dd83530 100644
--- a/include/hw/nubus/mac-nubus-bridge.h
+++ b/include/hw/nubus/mac-nubus-bridge.h
@@ -6,19 +6,24 @@
*
*/
-#ifndef HW_NUBUS_MAC_H
-#define HW_NUBUS_MAC_H
+#ifndef HW_NUBUS_MAC_NUBUS_BRIDGE_H
+#define HW_NUBUS_MAC_NUBUS_BRIDGE_H
#include "hw/nubus/nubus.h"
+#include "qom/object.h"
+
+#define MAC_NUBUS_FIRST_SLOT 0x9
+#define MAC_NUBUS_LAST_SLOT 0xe
+#define MAC_NUBUS_SLOT_NB (MAC_NUBUS_LAST_SLOT - MAC_NUBUS_FIRST_SLOT + 1)
#define TYPE_MAC_NUBUS_BRIDGE "mac-nubus-bridge"
-#define MAC_NUBUS_BRIDGE(obj) OBJECT_CHECK(MacNubusState, (obj), \
- TYPE_MAC_NUBUS_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(MacNubusBridge, MAC_NUBUS_BRIDGE)
-typedef struct MacNubusState {
- SysBusDevice sysbus_dev;
+struct MacNubusBridge {
+ NubusBridge parent_obj;
- NubusBus *bus;
-} MacNubusState;
+ MemoryRegion super_slot_alias;
+ MemoryRegion slot_alias;
+};
#endif
diff --git a/include/hw/nubus/nubus-virtio-mmio.h b/include/hw/nubus/nubus-virtio-mmio.h
new file mode 100644
index 0000000000..de497b7f76
--- /dev/null
+++ b/include/hw/nubus/nubus-virtio-mmio.h
@@ -0,0 +1,36 @@
+/*
+ * QEMU Macintosh Nubus Virtio MMIO card
+ *
+ * Copyright (c) 2023 Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_NUBUS_VIRTIO_MMIO_H
+#define HW_NUBUS_VIRTIO_MMIO_H
+
+#include "hw/nubus/nubus.h"
+#include "qom/object.h"
+#include "hw/intc/goldfish_pic.h"
+#include "hw/virtio/virtio-mmio.h"
+
+#define TYPE_NUBUS_VIRTIO_MMIO "nubus-virtio-mmio"
+OBJECT_DECLARE_TYPE(NubusVirtioMMIO, NubusVirtioMMIODeviceClass,
+ NUBUS_VIRTIO_MMIO)
+
+struct NubusVirtioMMIODeviceClass {
+ DeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+#define NUBUS_VIRTIO_MMIO_NUM_DEVICES 32
+
+struct NubusVirtioMMIO {
+ NubusDevice parent_obj;
+
+ GoldfishPICState pic;
+ VirtIOMMIOProxy virtio_mmio[NUBUS_VIRTIO_MMIO_NUM_DEVICES];
+};
+
+#endif
diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h
index a8634e54c5..fee79b71d1 100644
--- a/include/hw/nubus/nubus.h
+++ b/include/hw/nubus/nubus.h
@@ -10,60 +10,66 @@
#define HW_NUBUS_NUBUS_H
#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
#include "exec/address-spaces.h"
+#include "qom/object.h"
+#include "qemu/units.h"
#define NUBUS_SUPER_SLOT_SIZE 0x10000000U
-#define NUBUS_SUPER_SLOT_NB 0x9
+#define NUBUS_SUPER_SLOT_NB 0xe
+
+#define NUBUS_SLOT_BASE (NUBUS_SUPER_SLOT_SIZE * \
+ (NUBUS_SUPER_SLOT_NB + 1))
#define NUBUS_SLOT_SIZE 0x01000000
-#define NUBUS_SLOT_NB 0xF
+#define NUBUS_FIRST_SLOT 0x0
+#define NUBUS_LAST_SLOT 0xf
+#define NUBUS_SLOT_NB (NUBUS_LAST_SLOT - NUBUS_FIRST_SLOT + 1)
-#define NUBUS_FIRST_SLOT 0x9
-#define NUBUS_LAST_SLOT 0xF
+#define NUBUS_IRQS 16
#define TYPE_NUBUS_DEVICE "nubus-device"
-#define NUBUS_DEVICE(obj) \
- OBJECT_CHECK(NubusDevice, (obj), TYPE_NUBUS_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE)
#define TYPE_NUBUS_BUS "nubus-bus"
-#define NUBUS_BUS(obj) OBJECT_CHECK(NubusBus, (obj), TYPE_NUBUS_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(NubusBus, NUBUS_BUS)
#define TYPE_NUBUS_BRIDGE "nubus-bridge"
-#define NUBUS_BRIDGE(obj) OBJECT_CHECK(NubusBridge, (obj), TYPE_NUBUS_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(NubusBridge, NUBUS_BRIDGE);
-typedef struct NubusBus {
+struct NubusBus {
BusState qbus;
+ AddressSpace nubus_as;
+ MemoryRegion nubus_mr;
+
MemoryRegion super_slot_io;
MemoryRegion slot_io;
- int current_slot;
-} NubusBus;
+ uint16_t slot_available_mask;
-typedef struct NubusDevice {
- DeviceState qdev;
+ qemu_irq irqs[NUBUS_IRQS];
+};
- int slot_nb;
- MemoryRegion slot_mem;
+#define NUBUS_DECL_ROM_MAX_SIZE (1 * MiB)
- /* Format Block */
+struct NubusDevice {
+ DeviceState qdev;
- MemoryRegion fblock_io;
+ int32_t slot;
+ MemoryRegion super_slot_mem;
+ MemoryRegion slot_mem;
- uint32_t rom_length;
- uint32_t rom_crc;
- uint8_t rom_rev;
- uint8_t rom_format;
- uint8_t byte_lanes;
- int32_t directory_offset;
+ char *romfile;
+ MemoryRegion decl_rom;
+};
- /* ROM */
+void nubus_set_irq(NubusDevice *nd, int level);
- MemoryRegion rom_io;
- const uint8_t *rom;
-} NubusDevice;
+struct NubusBridge {
+ SysBusDevice parent_obj;
-void nubus_register_rom(NubusDevice *dev, const uint8_t *rom, uint32_t size,
- int revision, int format, uint8_t byte_lanes);
+ NubusBus bus;
+};
#endif
diff --git a/include/hw/nvram/chrp_nvram.h b/include/hw/nvram/chrp_nvram.h
index 09941a9be4..4a0f5c21b8 100644
--- a/include/hw/nvram/chrp_nvram.h
+++ b/include/hw/nvram/chrp_nvram.h
@@ -50,7 +50,8 @@ chrp_nvram_finish_partition(ChrpNvramPartHdr *header, uint32_t size)
header->checksum = sum & 0xff;
}
-int chrp_nvram_create_system_partition(uint8_t *data, int min_len);
+/* chrp_nvram_create_system_partition() failure is fatal */
+int chrp_nvram_create_system_partition(uint8_t *data, int min_len, int max_len);
int chrp_nvram_create_free_partition(uint8_t *data, int len);
#endif
diff --git a/include/hw/nvram/eeprom_at24c.h b/include/hw/nvram/eeprom_at24c.h
new file mode 100644
index 0000000000..acb9857b2a
--- /dev/null
+++ b/include/hw/nvram/eeprom_at24c.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ */
+
+#ifndef EEPROM_AT24C_H
+#define EEPROM_AT24C_H
+
+#include "hw/i2c/i2c.h"
+
+/*
+ * Create and realize an AT24C EEPROM device on the heap.
+ * @bus: I2C bus to put it on
+ * @address: I2C address of the EEPROM slave when put on a bus
+ * @rom_size: size of the EEPROM
+ *
+ * Create the device state structure, initialize it, put it on the specified
+ * @bus, and drop the reference to it (the device is realized).
+ */
+I2CSlave *at24c_eeprom_init(I2CBus *bus, uint8_t address, uint32_t rom_size);
+
+
+/*
+ * Create and realize an AT24C EEPROM device on the heap with initial data.
+ * @bus: I2C bus to put it on
+ * @address: I2C address of the EEPROM slave when put on a bus
+ * @rom_size: size of the EEPROM
+ * @init_rom: Array of bytes to initialize EEPROM memory with
+ * @init_rom_size: Size of @init_rom, must be less than or equal to @rom_size
+ *
+ * Create the device state structure, initialize it, put it on the specified
+ * @bus, and drop the reference to it (the device is realized). Copies the data
+ * from @init_rom to the beginning of the EEPROM memory buffer.
+ */
+I2CSlave *at24c_eeprom_init_rom(I2CBus *bus, uint8_t address, uint32_t rom_size,
+ const uint8_t *init_rom, uint32_t init_rom_size);
+
+#endif
diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h
index f190c428e8..c1f81a5f13 100644
--- a/include/hw/nvram/fw_cfg.h
+++ b/include/hw/nvram/fw_cfg.h
@@ -5,24 +5,22 @@
#include "standard-headers/linux/qemu_fw_cfg.h"
#include "hw/sysbus.h"
#include "sysemu/dma.h"
+#include "qom/object.h"
#define TYPE_FW_CFG "fw_cfg"
#define TYPE_FW_CFG_IO "fw_cfg_io"
#define TYPE_FW_CFG_MEM "fw_cfg_mem"
#define TYPE_FW_CFG_DATA_GENERATOR_INTERFACE "fw_cfg-data-generator"
-#define FW_CFG(obj) OBJECT_CHECK(FWCfgState, (obj), TYPE_FW_CFG)
-#define FW_CFG_IO(obj) OBJECT_CHECK(FWCfgIoState, (obj), TYPE_FW_CFG_IO)
-#define FW_CFG_MEM(obj) OBJECT_CHECK(FWCfgMemState, (obj), TYPE_FW_CFG_MEM)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgState, FW_CFG)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgIoState, FW_CFG_IO)
+OBJECT_DECLARE_SIMPLE_TYPE(FWCfgMemState, FW_CFG_MEM)
-#define FW_CFG_DATA_GENERATOR_CLASS(class) \
- OBJECT_CLASS_CHECK(FWCfgDataGeneratorClass, (class), \
+typedef struct FWCfgDataGeneratorClass FWCfgDataGeneratorClass;
+DECLARE_CLASS_CHECKERS(FWCfgDataGeneratorClass, FW_CFG_DATA_GENERATOR,
TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)
-#define FW_CFG_DATA_GENERATOR_GET_CLASS(obj) \
- OBJECT_GET_CLASS(FWCfgDataGeneratorClass, (obj), \
- TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)
-typedef struct FWCfgDataGeneratorClass {
+struct FWCfgDataGeneratorClass {
/*< private >*/
InterfaceClass parent_class;
/*< public >*/
@@ -39,7 +37,7 @@ typedef struct FWCfgDataGeneratorClass {
* required.
*/
GByteArray *(*get_data)(Object *obj, Error **errp);
-} FWCfgDataGeneratorClass;
+};
typedef struct fw_cfg_file FWCfgFile;
@@ -310,6 +308,15 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename, void *data,
bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
const char *gen_id, Error **errp);
+/**
+ * fw_cfg_add_extra_pci_roots:
+ * @bus: main pci root bus to be scanned from
+ * @s: fw_cfg device being modified
+ *
+ * Add a new fw_cfg item...
+ */
+void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s);
+
FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
AddressSpace *dma_as);
FWCfgState *fw_cfg_init_io(uint32_t iobase);
@@ -335,4 +342,25 @@ bool fw_cfg_dma_enabled(void *opaque);
*/
const char *fw_cfg_arch_key_name(uint16_t key);
+/**
+ * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified
+ * by key.
+ * @fw_cfg: The firmware config instance to store the data in.
+ * @size_key: The firmware config key to store the size of the loaded
+ * data under, with fw_cfg_add_i32().
+ * @data_key: The firmware config key to store the loaded data under,
+ * with fw_cfg_add_bytes().
+ * @image_name: The name of the image file to load. If it is NULL, the
+ * function returns without doing anything.
+ * @try_decompress: Whether the image should be decompressed (gunzipped) before
+ * adding it to fw_cfg. If decompression fails, the image is
+ * loaded as-is.
+ *
+ * In case of failure, the function prints an error message to stderr and the
+ * process exits with status 1.
+ */
+void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
+ uint16_t data_key, const char *image_name,
+ bool try_decompress);
+
#endif
diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h
new file mode 100644
index 0000000000..b39eb0490f
--- /dev/null
+++ b/include/hw/nvram/fw_cfg_acpi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for fw_cfg
+ *
+ */
+
+#ifndef FW_CFG_ACPI_H
+#define FW_CFG_ACPI_H
+
+#include "exec/hwaddr.h"
+
+void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap);
+
+#endif
diff --git a/include/hw/nvram/mac_nvram.h b/include/hw/nvram/mac_nvram.h
new file mode 100644
index 0000000000..0c4dfaeff6
--- /dev/null
+++ b/include/hw/nvram/mac_nvram.h
@@ -0,0 +1,52 @@
+/*
+ * PowerMac NVRAM emulation
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MAC_NVRAM_H
+#define MAC_NVRAM_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+#define MACIO_NVRAM_SIZE 0x2000
+
+#define TYPE_MACIO_NVRAM "macio-nvram"
+OBJECT_DECLARE_SIMPLE_TYPE(MacIONVRAMState, MACIO_NVRAM)
+
+struct MacIONVRAMState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ uint32_t size;
+ uint32_t it_shift;
+
+ MemoryRegion mem;
+ uint8_t *data;
+ BlockBackend *blk;
+};
+
+void pmac_format_nvram_partition(MacIONVRAMState *nvr, int len);
+
+#endif /* MAC_NVRAM_H */
diff --git a/include/hw/nvram/npcm7xx_otp.h b/include/hw/nvram/npcm7xx_otp.h
new file mode 100644
index 0000000000..ea4b5d0731
--- /dev/null
+++ b/include/hw/nvram/npcm7xx_otp.h
@@ -0,0 +1,79 @@
+/*
+ * Nuvoton NPCM7xx OTP (Fuse Array) Interface
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_OTP_H
+#define NPCM7XX_OTP_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+
+/* Each OTP module holds 8192 bits of one-time programmable storage */
+#define NPCM7XX_OTP_ARRAY_BITS (8192)
+#define NPCM7XX_OTP_ARRAY_BYTES (NPCM7XX_OTP_ARRAY_BITS / BITS_PER_BYTE)
+
+/* Fuse array offsets */
+#define NPCM7XX_FUSE_FUSTRAP (0)
+#define NPCM7XX_FUSE_CP_FUSTRAP (12)
+#define NPCM7XX_FUSE_DAC_CALIB (16)
+#define NPCM7XX_FUSE_ADC_CALIB (24)
+#define NPCM7XX_FUSE_DERIVATIVE (64)
+#define NPCM7XX_FUSE_TEST_SIG (72)
+#define NPCM7XX_FUSE_DIE_LOCATION (74)
+#define NPCM7XX_FUSE_GP1 (80)
+#define NPCM7XX_FUSE_GP2 (128)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_OTP_NR_REGS (0x18 / sizeof(uint32_t))
+
+/**
+ * struct NPCM7xxOTPState - Device state for one OTP module.
+ * @parent: System bus device.
+ * @mmio: Memory region through which registers are accessed.
+ * @regs: Register contents.
+ * @array: OTP storage array.
+ */
+typedef struct NPCM7xxOTPState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+ uint32_t regs[NPCM7XX_OTP_NR_REGS];
+ uint8_t array[NPCM7XX_OTP_ARRAY_BYTES];
+} NPCM7xxOTPState;
+
+#define TYPE_NPCM7XX_OTP "npcm7xx-otp"
+#define NPCM7XX_OTP(obj) OBJECT_CHECK(NPCM7xxOTPState, (obj), TYPE_NPCM7XX_OTP)
+
+#define TYPE_NPCM7XX_KEY_STORAGE "npcm7xx-key-storage"
+#define TYPE_NPCM7XX_FUSE_ARRAY "npcm7xx-fuse-array"
+
+typedef struct NPCM7xxOTPClass NPCM7xxOTPClass;
+
+/**
+ * npcm7xx_otp_array_write - ECC encode and write data to OTP array.
+ * @s: OTP module.
+ * @data: Data to be encoded and written.
+ * @offset: Offset of first byte to be written in the OTP array.
+ * @len: Number of bytes before ECC encoding.
+ *
+ * Each nibble of data is encoded into a byte, so the number of bytes written
+ * to the array will be @len * 2.
+ */
+void npcm7xx_otp_array_write(NPCM7xxOTPState *s, const void *data,
+ unsigned int offset, unsigned int len);
+
+#endif /* NPCM7XX_OTP_H */
diff --git a/include/hw/nvram/nrf51_nvm.h b/include/hw/nvram/nrf51_nvm.h
index 3792e4a9fe..d85e788df5 100644
--- a/include/hw/nvram/nrf51_nvm.h
+++ b/include/hw/nvram/nrf51_nvm.h
@@ -23,8 +23,9 @@
#define NRF51_NVM_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_NRF51_NVM "nrf51_soc.nvm"
-#define NRF51_NVM(obj) OBJECT_CHECK(NRF51NVMState, (obj), TYPE_NRF51_NVM)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51NVMState, NRF51_NVM)
#define NRF51_UICR_FIXTURE_SIZE 64
@@ -44,7 +45,7 @@
#define NRF51_UICR_SIZE 0x100
-typedef struct NRF51NVMState {
+struct NRF51NVMState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -58,7 +59,7 @@ typedef struct NRF51NVMState {
uint32_t config;
-} NRF51NVMState;
+};
#endif
diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h
new file mode 100644
index 0000000000..6fc13f8cc1
--- /dev/null
+++ b/include/hw/nvram/xlnx-bbram.h
@@ -0,0 +1,54 @@
+/*
+ * QEMU model of the Xilinx BBRAM Battery Backed RAM
+ *
+ * Copyright (c) 2015-2021 Xilinx Inc.
+ *
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_BBRAM_H
+#define XLNX_BBRAM_H
+
+#include "sysemu/block-backend.h"
+#include "hw/qdev-core.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+
+#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1)
+
+#define TYPE_XLNX_BBRAM "xlnx.bbram-ctrl"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM);
+
+struct XlnxBBRam {
+ SysBusDevice parent_obj;
+ qemu_irq irq_bbram;
+
+ BlockBackend *blk;
+
+ uint32_t crc_zpads;
+ bool bbram8_wo;
+ bool blk_ro;
+
+ uint32_t regs[RMAX_XLNX_BBRAM];
+ RegisterInfo regs_info[RMAX_XLNX_BBRAM];
+};
+
+#endif
diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h
new file mode 100644
index 0000000000..cff7924106
--- /dev/null
+++ b/include/hw/nvram/xlnx-efuse.h
@@ -0,0 +1,132 @@
+/*
+ * QEMU model of the Xilinx eFuse core
+ *
+ * Copyright (c) 2015 Xilinx Inc.
+ *
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_EFUSE_H
+#define XLNX_EFUSE_H
+
+#include "sysemu/block-backend.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_XLNX_EFUSE "xlnx-efuse"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE);
+
+struct XlnxEFuse {
+ DeviceState parent_obj;
+ BlockBackend *blk;
+ bool blk_ro;
+ uint32_t *fuse32;
+
+ DeviceState *dev;
+
+ bool init_tbits;
+
+ uint8_t efuse_nr;
+ uint32_t efuse_size;
+
+ uint32_t *ro_bits;
+ uint32_t ro_bits_cnt;
+};
+
+/**
+ * xlnx_efuse_calc_crc:
+ * @data: an array of 32-bit words for which the CRC should be computed
+ * @u32_cnt: the array size in number of 32-bit words
+ * @zpads: the number of 32-bit zeros prepended to @data before computation
+ *
+ * This function is used to compute the CRC for an array of 32-bit words,
+ * using a Xilinx-specific data padding.
+ *
+ * Returns: the computed 32-bit CRC
+ */
+uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt,
+ unsigned zpads);
+
+/**
+ * xlnx_efuse_get_bit:
+ * @s: the efuse object
+ * @bit: the efuse bit-address to read the data
+ *
+ * Returns: the bit, 0 or 1, at @bit of object @s
+ */
+bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit);
+
+/**
+ * xlnx_efuse_set_bit:
+ * @s: the efuse object
+ * @bit: the efuse bit-address to be written a value of 1
+ *
+ * Returns: true on success, false on failure
+ */
+bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit);
+
+/**
+ * xlnx_efuse_k256_check:
+ * @s: the efuse object
+ * @crc: the 32-bit CRC to be compared with
+ * @start: the efuse bit-address (which must be multiple of 32) of the
+ * start of a 256-bit array
+ *
+ * This function computes the CRC of a 256-bit array starting at @start
+ * then compares to the given @crc
+ *
+ * Returns: true of @crc == computed, false otherwise
+ */
+bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start);
+
+/**
+ * xlnx_efuse_tbits_check:
+ * @s: the efuse object
+ *
+ * This function inspects a number of efuse bits at specific addresses
+ * to see if they match a validation pattern. Each pattern is a group
+ * of 4 bits, and there are 3 groups.
+ *
+ * Returns: a 3-bit mask, where a bit of '1' means the corresponding
+ * group has a valid pattern.
+ */
+uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s);
+
+/**
+ * xlnx_efuse_get_row:
+ * @s: the efuse object
+ * @bit: the efuse bit address for which a 32-bit value is read
+ *
+ * Returns: the entire 32 bits of the efuse, starting at a bit
+ * address that is multiple of 32 and contains the bit at @bit
+ */
+static inline uint32_t xlnx_efuse_get_row(XlnxEFuse *s, unsigned int bit)
+{
+ if (!(s->fuse32)) {
+ return 0;
+ } else {
+ unsigned int row_idx = bit / 32;
+
+ assert(row_idx < (s->efuse_size * s->efuse_nr / 32));
+ return s->fuse32[row_idx];
+ }
+}
+
+#endif
diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h
new file mode 100644
index 0000000000..86e2261b9a
--- /dev/null
+++ b/include/hw/nvram/xlnx-versal-efuse.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_VERSAL_EFUSE_H
+#define XLNX_VERSAL_EFUSE_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/nvram/xlnx-efuse.h"
+
+#define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1)
+
+#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx-versal-efuse"
+#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx-pmc-efuse-cache"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL);
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE);
+
+struct XlnxVersalEFuseCtrl {
+ SysBusDevice parent_obj;
+ qemu_irq irq_efuse_imr;
+
+ XlnxEFuse *efuse;
+
+ void *extra_pg0_lock_spec; /* Opaque property */
+ uint32_t extra_pg0_lock_n16;
+
+ uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
+ RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
+};
+
+struct XlnxVersalEFuseCache {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+
+ XlnxEFuse *efuse;
+};
+
+/**
+ * xlnx_versal_efuse_read_row:
+ * @s: the efuse object
+ * @bit: the bit-address within the 32-bit row to be read
+ * @denied: if non-NULL, to receive true if the row is write-only
+ *
+ * Returns: the 32-bit word containing address @bit; 0 if @denies is true
+ */
+uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *s, uint32_t bit, bool *denied);
+
+#endif
diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h
new file mode 100644
index 0000000000..f5beacc2e6
--- /dev/null
+++ b/include/hw/nvram/xlnx-zynqmp-efuse.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Xilinx Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef XLNX_ZYNQMP_EFUSE_H
+#define XLNX_ZYNQMP_EFUSE_H
+
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/nvram/xlnx-efuse.h"
+
+#define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1)
+
+#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx-zynqmp-efuse"
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE);
+
+struct XlnxZynqMPEFuse {
+ SysBusDevice parent_obj;
+ qemu_irq irq;
+
+ XlnxEFuse *efuse;
+ uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX];
+ RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX];
+};
+
+#endif
diff --git a/include/hw/openrisc/boot.h b/include/hw/openrisc/boot.h
new file mode 100644
index 0000000000..25a313d63a
--- /dev/null
+++ b/include/hw/openrisc/boot.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU OpenRISC boot helpers.
+ *
+ * Copyright (c) 2022 Stafford Horne <shorne@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef OPENRISC_BOOT_H
+#define OPENRISC_BOOT_H
+
+#include "exec/cpu-defs.h"
+
+hwaddr openrisc_load_kernel(ram_addr_t ram_size,
+ const char *kernel_filename,
+ uint32_t *bootstrap_pc);
+
+hwaddr openrisc_load_initrd(void *fdt, const char *filename,
+ hwaddr load_start, uint64_t mem_size);
+
+uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start,
+ uint64_t mem_size);
+
+#endif /* OPENRISC_BOOT_H */
diff --git a/include/hw/or-irq.h b/include/hw/or-irq.h
index 0038bfbe3d..c0a42f3711 100644
--- a/include/hw/or-irq.h
+++ b/include/hw/or-irq.h
@@ -35,9 +35,7 @@
*/
#define MAX_OR_LINES 48
-typedef struct OrIRQState qemu_or_irq;
-
-#define OR_IRQ(obj) OBJECT_CHECK(qemu_or_irq, (obj), TYPE_OR_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(OrIRQState, OR_IRQ)
struct OrIRQState {
DeviceState parent_obj;
diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h
new file mode 100644
index 0000000000..12635139f6
--- /dev/null
+++ b/include/hw/pci-bridge/cxl_upstream_port.h
@@ -0,0 +1,19 @@
+
+#ifndef CXL_USP_H
+#define CXL_USP_H
+#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_port.h"
+#include "hw/cxl/cxl.h"
+
+typedef struct CXLUpstreamPort {
+ /*< private >*/
+ PCIEPort parent_obj;
+
+ /*< public >*/
+ CXLComponentState cxl_cstate;
+ CXLCCI swcci;
+ DOECap doe_cdat;
+ uint64_t sn;
+} CXLUpstreamPort;
+
+#endif /* CXL_SUP_H */
diff --git a/include/hw/pci-bridge/pci_expander_bridge.h b/include/hw/pci-bridge/pci_expander_bridge.h
new file mode 100644
index 0000000000..0b3856d615
--- /dev/null
+++ b/include/hw/pci-bridge/pci_expander_bridge.h
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PCI_EXPANDER_BRIDGE_H
+#define PCI_EXPANDER_BRIDGE_H
+
+#include "hw/cxl/cxl.h"
+
+void pxb_cxl_hook_up_registers(CXLState *state, PCIBus *bus, Error **errp);
+
+#endif /* PCI_EXPANDER_BRIDGE_H */
diff --git a/include/hw/pci-bridge/simba.h b/include/hw/pci-bridge/simba.h
index d8649973ee..979cb17435 100644
--- a/include/hw/pci-bridge/simba.h
+++ b/include/hw/pci-bridge/simba.h
@@ -28,15 +28,15 @@
#define HW_PCI_BRIDGE_SIMBA_H
#include "hw/pci/pci_bridge.h"
+#include "qom/object.h"
-typedef struct SimbaPCIBridge {
+struct SimbaPCIBridge {
/*< private >*/
PCIBridge parent_obj;
-} SimbaPCIBridge;
+};
#define TYPE_SIMBA_PCI_BRIDGE "pbm-bridge"
-#define SIMBA_PCI_BRIDGE(obj) \
- OBJECT_CHECK(SimbaPCIBridge, (obj), TYPE_SIMBA_PCI_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(SimbaPCIBridge, SIMBA_PCI_BRIDGE)
#endif
diff --git a/include/hw/pci-bridge/xio3130_downstream.h b/include/hw/pci-bridge/xio3130_downstream.h
new file mode 100644
index 0000000000..1d10139aea
--- /dev/null
+++ b/include/hw/pci-bridge/xio3130_downstream.h
@@ -0,0 +1,15 @@
+/*
+ * TI X3130 pci express downstream port switch
+ *
+ * Copyright (C) 2022 Igor Mammedov <imammedo@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H
+#define HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H
+
+#define TYPE_XIO3130_DOWNSTREAM "xio3130-downstream"
+
+#endif
+
diff --git a/include/hw/pci-host/articia.h b/include/hw/pci-host/articia.h
new file mode 100644
index 0000000000..529c240274
--- /dev/null
+++ b/include/hw/pci-host/articia.h
@@ -0,0 +1,17 @@
+/*
+ * Mai Logic Articia S emulation
+ *
+ * Copyright (c) 2023 BALATON Zoltan
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef ARTICIA_H
+#define ARTICIA_H
+
+#define TYPE_ARTICIA "articia"
+#define TYPE_ARTICIA_PCI_HOST "articia-pci-host"
+#define TYPE_ARTICIA_PCI_BRIDGE "articia-pci-bridge"
+
+#endif
diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h
new file mode 100644
index 0000000000..e2966917cd
--- /dev/null
+++ b/include/hw/pci-host/astro.h
@@ -0,0 +1,94 @@
+/*
+ * HP-PARISC Astro Bus connector with Elroy PCI host bridges
+ */
+
+#ifndef ASTRO_H
+#define ASTRO_H
+
+#include "hw/pci/pci_host.h"
+
+#define ASTRO_HPA 0xfed00000
+
+#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
+
+#define TYPE_ASTRO_CHIP "astro-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(AstroState, ASTRO_CHIP)
+
+#define TYPE_ELROY_PCI_HOST_BRIDGE "elroy-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE)
+
+#define ELROY_NUM 4 /* # of Elroys */
+#define ELROY_IRQS 8 /* IOSAPIC IRQs */
+
+/* ASTRO Memory and I/O regions */
+#define LMMIO_DIST_BASE_ADDR 0xf4000000ULL
+#define LMMIO_DIST_BASE_SIZE 0x4000000ULL
+
+#define IOS_DIST_BASE_ADDR 0xfffee00000ULL
+#define IOS_DIST_BASE_SIZE 0x10000ULL
+
+#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
+
+struct AstroState;
+
+struct ElroyState {
+ PCIHostState parent_obj;
+
+ /* parent Astro device */
+ struct AstroState *astro;
+
+ /* HPA of this Elroy */
+ hwaddr hpa;
+
+ /* PCI bus number (Elroy number) */
+ unsigned int pci_bus_num;
+
+ uint64_t config_address;
+ uint64_t config_reg_elroy;
+
+ uint64_t status_control;
+ uint64_t arb_mask;
+ uint64_t mmio_base[(0x0250 - 0x200) / 8];
+ uint64_t error_config;
+
+ uint32_t iosapic_reg_select;
+ uint64_t iosapic_reg[0x20];
+
+ uint32_t ilr;
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_mmio_alias;
+ MemoryRegion pci_hole;
+ MemoryRegion pci_io;
+};
+
+struct AstroState {
+ PCIHostState parent_obj;
+
+ uint64_t ioc_ctrl;
+ uint64_t ioc_status_ctrl;
+ uint64_t ioc_ranges[(0x03d8 - 0x300) / 8];
+ uint64_t ioc_rope_config;
+ uint64_t ioc_status_control;
+ uint64_t ioc_flush_control;
+ uint64_t ioc_rope_control[8];
+ uint64_t tlb_ibase;
+ uint64_t tlb_imask;
+ uint64_t tlb_pcom;
+ uint64_t tlb_tcnfg;
+ uint64_t tlb_pdir_base;
+
+ struct ElroyState *elroy[ELROY_NUM];
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_io;
+
+ IOMMUMemoryRegion iommu;
+ AddressSpace iommu_as;
+};
+
+#endif
diff --git a/include/hw/pci-host/bonito.h b/include/hw/pci-host/bonito.h
new file mode 100644
index 0000000000..b8ecf7870a
--- /dev/null
+++ b/include/hw/pci-host/bonito.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU Bonito64 north bridge support
+ *
+ * Copyright (c) 2008 yajin (yajin@vm-kernel.org)
+ * Copyright (c) 2010 Huacai Chen (zltjiangshi@gmail.com)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCI_HOST_BONITO_H
+#define HW_PCI_HOST_BONITO_H
+
+#include "qom/object.h"
+
+#define TYPE_BONITO_PCI_HOST_BRIDGE "Bonito-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(BonitoState, BONITO_PCI_HOST_BRIDGE)
+
+#endif
diff --git a/include/hw/pci-host/designware.h b/include/hw/pci-host/designware.h
index 31c41231b1..908f3d946b 100644
--- a/include/hw/pci-host/designware.h
+++ b/include/hw/pci-host/designware.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,21 +22,16 @@
#define DESIGNWARE_H
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
-#include "hw/pci/pci_bus.h"
-#include "hw/pci/pcie_host.h"
#include "hw/pci/pci_bridge.h"
+#include "qom/object.h"
#define TYPE_DESIGNWARE_PCIE_HOST "designware-pcie-host"
-#define DESIGNWARE_PCIE_HOST(obj) \
- OBJECT_CHECK(DesignwarePCIEHost, (obj), TYPE_DESIGNWARE_PCIE_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIEHost, DESIGNWARE_PCIE_HOST)
#define TYPE_DESIGNWARE_PCIE_ROOT "designware-pcie-root"
-#define DESIGNWARE_PCIE_ROOT(obj) \
- OBJECT_CHECK(DesignwarePCIERoot, (obj), TYPE_DESIGNWARE_PCIE_ROOT)
+OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERoot, DESIGNWARE_PCIE_ROOT)
struct DesignwarePCIERoot;
-typedef struct DesignwarePCIERoot DesignwarePCIERoot;
typedef struct DesignwarePCIEViewport {
DesignwarePCIERoot *root;
@@ -80,7 +75,7 @@ struct DesignwarePCIERoot {
DesignwarePCIEMSI msi;
};
-typedef struct DesignwarePCIEHost {
+struct DesignwarePCIEHost {
PCIHostState parent_obj;
DesignwarePCIERoot root;
@@ -96,6 +91,6 @@ typedef struct DesignwarePCIEHost {
} pci;
MemoryRegion mmio;
-} DesignwarePCIEHost;
+};
#endif /* DESIGNWARE_H */
diff --git a/include/hw/pci-host/dino.h b/include/hw/pci-host/dino.h
new file mode 100644
index 0000000000..fd7975c798
--- /dev/null
+++ b/include/hw/pci-host/dino.h
@@ -0,0 +1,146 @@
+/*
+ * HP-PARISC Dino PCI chipset emulation, as in B160L and similar machines
+ *
+ * (C) 2017-2019 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Documentation available at:
+ * https://parisc.wiki.kernel.org/images-parisc/9/91/Dino_ers.pdf
+ * https://parisc.wiki.kernel.org/images-parisc/7/70/Dino_3_1_Errata.pdf
+ */
+
+#ifndef DINO_H
+#define DINO_H
+
+#include "hw/pci/pci_host.h"
+
+#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(DinoState, DINO_PCI_HOST_BRIDGE)
+
+#define DINO_IAR0 0x004
+#define DINO_IODC 0x008
+#define DINO_IRR0 0x00C /* RO */
+#define DINO_IAR1 0x010
+#define DINO_IRR1 0x014 /* RO */
+#define DINO_IMR 0x018
+#define DINO_IPR 0x01C
+#define DINO_TOC_ADDR 0x020
+#define DINO_ICR 0x024
+#define DINO_ILR 0x028 /* RO */
+#define DINO_IO_COMMAND 0x030 /* WO */
+#define DINO_IO_STATUS 0x034 /* RO */
+#define DINO_IO_CONTROL 0x038
+#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */
+#define DINO_IO_ERR_INFO 0x044 /* RO */
+#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */
+#define DINO_IO_FBB_EN 0x05c
+#define DINO_IO_ADDR_EN 0x060
+#define DINO_PCI_CONFIG_ADDR 0x064
+#define DINO_PCI_CONFIG_DATA 0x068
+#define DINO_PCI_IO_DATA 0x06c
+#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */
+#define DINO_GSC2X_CONFIG 0x7b4 /* RO */
+#define DINO_GMASK 0x800
+#define DINO_PAMR 0x804
+#define DINO_PAPR 0x808
+#define DINO_DAMODE 0x80c
+#define DINO_PCICMD 0x810
+#define DINO_PCISTS 0x814 /* R/WC */
+#define DINO_MLTIM 0x81c
+#define DINO_BRDG_FEAT 0x820
+#define DINO_PCIROR 0x824
+#define DINO_PCIWOR 0x828
+#define DINO_TLTIM 0x830
+
+#define DINO_IRQS 11 /* bits 0-10 are architected */
+#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
+#define DINO_LOCAL_IRQS (DINO_IRQS + 1)
+#define DINO_MASK_IRQ(x) (1 << (x))
+
+#define DINO_IRQ_PCIINTA 0
+#define DINO_IRQ_PCIINTB 1
+#define DINO_IRQ_PCIINTC 2
+#define DINO_IRQ_PCIINTD 3
+#define DINO_IRQ_PCIINTE 4
+#define DINO_IRQ_PCIINTF 5
+#define DINO_IRQ_GSCEXTINT 6
+#define DINO_IRQ_BUSERRINT 7
+#define DINO_IRQ_PS2INT 8
+#define DINO_IRQ_UNUSED 9
+#define DINO_IRQ_RS232INT 10
+
+#define PCIINTA 0x001
+#define PCIINTB 0x002
+#define PCIINTC 0x004
+#define PCIINTD 0x008
+#define PCIINTE 0x010
+#define PCIINTF 0x020
+#define GSCEXTINT 0x040
+/* #define xxx 0x080 - bit 7 is "default" */
+/* #define xxx 0x100 - bit 8 not used */
+/* #define xxx 0x200 - bit 9 not used */
+#define RS232INT 0x400
+
+#define DINO_MEM_CHUNK_SIZE (8 * MiB)
+
+#define DINO800_REGS (1 + (DINO_TLTIM - DINO_GMASK) / 4)
+static const uint32_t reg800_keep_bits[DINO800_REGS] = {
+ MAKE_64BIT_MASK(0, 1), /* GMASK */
+ MAKE_64BIT_MASK(0, 7), /* PAMR */
+ MAKE_64BIT_MASK(0, 7), /* PAPR */
+ MAKE_64BIT_MASK(0, 8), /* DAMODE */
+ MAKE_64BIT_MASK(0, 7), /* PCICMD */
+ MAKE_64BIT_MASK(0, 9), /* PCISTS */
+ MAKE_64BIT_MASK(0, 32), /* Undefined */
+ MAKE_64BIT_MASK(0, 8), /* MLTIM */
+ MAKE_64BIT_MASK(0, 30), /* BRDG_FEAT */
+ MAKE_64BIT_MASK(0, 24), /* PCIROR */
+ MAKE_64BIT_MASK(0, 22), /* PCIWOR */
+ MAKE_64BIT_MASK(0, 32), /* Undocumented */
+ MAKE_64BIT_MASK(0, 9), /* TLTIM */
+};
+
+/* offsets to DINO HPA: */
+#define DINO_PCI_ADDR 0x064
+#define DINO_CONFIG_DATA 0x068
+#define DINO_IO_DATA 0x06c
+
+struct DinoState {
+ PCIHostState parent_obj;
+
+ /*
+ * PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops,
+ * so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops.
+ */
+ uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */
+
+ uint32_t iar0;
+ uint32_t iar1;
+ uint32_t imr;
+ uint32_t ipr;
+ uint32_t icr;
+ uint32_t ilr;
+ uint32_t io_fbb_en;
+ uint32_t io_addr_en;
+ uint32_t io_control;
+ uint32_t toc_addr;
+
+ uint32_t reg800[DINO800_REGS];
+
+ MemoryRegion this_mem;
+ MemoryRegion pci_mem;
+ MemoryRegion pci_mem_alias[32];
+
+ MemoryRegion *memory_as;
+
+ AddressSpace bm_as;
+ MemoryRegion bm;
+ MemoryRegion bm_ram_alias;
+ MemoryRegion bm_pci_alias;
+ MemoryRegion bm_cpu_alias;
+
+ qemu_irq irqs[DINO_IRQS];
+};
+
+#endif
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
index faea040a93..dce883573b 100644
--- a/include/hw/pci-host/gpex.h
+++ b/include/hw/pci-host/gpex.h
@@ -20,27 +20,36 @@
#ifndef HW_GPEX_H
#define HW_GPEX_H
+#include "exec/hwaddr.h"
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pcie_host.h"
+#include "qom/object.h"
#define TYPE_GPEX_HOST "gpex-pcihost"
-#define GPEX_HOST(obj) \
- OBJECT_CHECK(GPEXHost, (obj), TYPE_GPEX_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST)
#define TYPE_GPEX_ROOT_DEVICE "gpex-root"
-#define MCH_PCI_DEVICE(obj) \
- OBJECT_CHECK(GPEXRootState, (obj), TYPE_GPEX_ROOT_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE)
#define GPEX_NUM_IRQS 4
-typedef struct GPEXRootState {
+struct GPEXRootState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
-} GPEXRootState;
+};
-typedef struct GPEXHost {
+struct GPEXConfig {
+ MemMapEntry ecam;
+ MemMapEntry mmio32;
+ MemMapEntry mmio64;
+ MemMapEntry pio;
+ int irq;
+ PCIBus *bus;
+};
+
+struct GPEXHost {
/*< private >*/
PCIExpressHost parent_obj;
/*< public >*/
@@ -49,10 +58,28 @@ typedef struct GPEXHost {
MemoryRegion io_ioport;
MemoryRegion io_mmio;
+ MemoryRegion io_ioport_window;
+ MemoryRegion io_mmio_window;
qemu_irq irq[GPEX_NUM_IRQS];
int irq_num[GPEX_NUM_IRQS];
-} GPEXHost;
+
+ bool allow_unmapped_accesses;
+
+ struct GPEXConfig gpex_cfg;
+};
int gpex_set_irq_num(GPEXHost *s, int index, int gsi);
+void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg);
+void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq);
+
+#define PCI_HOST_PIO_BASE "x-pio-base"
+#define PCI_HOST_PIO_SIZE "x-pio-size"
+#define PCI_HOST_ECAM_BASE "x-ecam-base"
+#define PCI_HOST_ECAM_SIZE "x-ecam-size"
+#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base"
+#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size"
+#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base"
+#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size"
+
#endif /* HW_GPEX_H */
diff --git a/include/hw/pci-host/grackle.h b/include/hw/pci-host/grackle.h
new file mode 100644
index 0000000000..7ad3a779f0
--- /dev/null
+++ b/include/hw/pci-host/grackle.h
@@ -0,0 +1,44 @@
+/*
+ * QEMU Grackle PCI host (heathrow OldWorld PowerMac)
+ *
+ * Copyright (c) 2006-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRACKLE_H
+#define GRACKLE_H
+
+#include "hw/pci/pci_host.h"
+
+#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(GrackleState, GRACKLE_PCI_HOST_BRIDGE)
+
+struct GrackleState {
+ PCIHostState parent_obj;
+
+ uint32_t ofw_addr;
+ qemu_irq irqs[4];
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_hole;
+ MemoryRegion pci_io;
+};
+
+#endif
diff --git a/include/hw/pci-host/i440fx.h b/include/hw/pci-host/i440fx.h
index cc58d82ed4..c988f70890 100644
--- a/include/hw/pci-host/i440fx.h
+++ b/include/hw/pci-host/i440fx.h
@@ -11,41 +11,27 @@
#ifndef HW_PCI_I440FX_H
#define HW_PCI_I440FX_H
-#include "hw/hw.h"
-#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci-host/pam.h"
+#include "qom/object.h"
+
+#define I440FX_HOST_PROP_PCI_TYPE "pci-type"
#define TYPE_I440FX_PCI_HOST_BRIDGE "i440FX-pcihost"
#define TYPE_I440FX_PCI_DEVICE "i440FX"
-#define I440FX_PCI_DEVICE(obj) \
- OBJECT_CHECK(PCII440FXState, (obj), TYPE_I440FX_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCII440FXState, I440FX_PCI_DEVICE)
-typedef struct PCII440FXState {
+struct PCII440FXState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
- MemoryRegion *system_memory;
- MemoryRegion *pci_address_space;
- MemoryRegion *ram_memory;
- PAMMemoryRegion pam_regions[13];
+ PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
MemoryRegion smram_region;
MemoryRegion smram, low_smram;
-} PCII440FXState;
+};
#define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX"
-PCIBus *i440fx_init(const char *host_type, const char *pci_type,
- PCII440FXState **pi440fx_state,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- ram_addr_t ram_size,
- ram_addr_t below_4g_mem_size,
- ram_addr_t above_4g_mem_size,
- MemoryRegion *pci_memory,
- MemoryRegion *ram_memory);
-
-PCIBus *find_i440fx(void);
-
#endif
diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h
new file mode 100644
index 0000000000..e753449593
--- /dev/null
+++ b/include/hw/pci-host/ls7a.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch CPU
+ *
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ */
+
+#ifndef HW_LS7A_H
+#define HW_LS7A_H
+
+#include "hw/pci-host/pam.h"
+#include "qemu/units.h"
+#include "qemu/range.h"
+#include "qom/object.h"
+
+#define VIRT_PCI_MEM_BASE 0x40000000UL
+#define VIRT_PCI_MEM_SIZE 0x40000000UL
+#define VIRT_PCI_IO_OFFSET 0x4000
+#define VIRT_PCI_CFG_BASE 0x20000000
+#define VIRT_PCI_CFG_SIZE 0x08000000
+#define VIRT_PCI_IO_BASE 0x18004000UL
+#define VIRT_PCI_IO_SIZE 0xC000
+
+#define VIRT_PCH_REG_BASE 0x10000000UL
+#define VIRT_IOAPIC_REG_BASE (VIRT_PCH_REG_BASE)
+#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL
+
+/*
+ * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot
+ * 0 - 15 GSI for ISA devices even if there is no ISA devices
+ * 16 - 63 GSI for CPU devices such as timers/perf monitor etc
+ * 64 - GSI for external devices
+ */
+#define VIRT_PCH_PIC_IRQ_NUM 32
+#define VIRT_GSI_BASE 64
+#define VIRT_DEVICE_IRQS 16
+#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2)
+#define VIRT_UART_BASE 0x1fe001e0
+#define VIRT_UART_SIZE 0X100
+#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3)
+#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000)
+#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100)
+#define VIRT_RTC_LEN 0x100
+#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4)
+
+#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000
+#define VIRT_PLATFORM_BUS_SIZE 0x2000000
+#define VIRT_PLATFORM_BUS_NUM_IRQS 2
+#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5)
+#endif
diff --git a/include/hw/pci-host/mv64361.h b/include/hw/pci-host/mv64361.h
new file mode 100644
index 0000000000..9cdb35cb3c
--- /dev/null
+++ b/include/hw/pci-host/mv64361.h
@@ -0,0 +1,8 @@
+#ifndef MV64361_H
+#define MV64361_H
+
+#define TYPE_MV64361 "mv64361"
+
+PCIBus *mv64361_get_pci_bus(DeviceState *dev, int n);
+
+#endif
diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h
index fec5cd35d6..005916f826 100644
--- a/include/hw/pci-host/pam.h
+++ b/include/hw/pci-host/pam.h
@@ -80,13 +80,16 @@
#define SMRAM_C_BASE_SEG_MASK ((uint8_t)0x7)
#define SMRAM_C_BASE_SEG ((uint8_t)0x2) /* hardwired to b010 */
+#define PAM_REGIONS_COUNT 13
+
typedef struct PAMMemoryRegion {
MemoryRegion alias[4]; /* index = PAM value */
unsigned current;
} PAMMemoryRegion;
-void init_pam(DeviceState *dev, MemoryRegion *ram, MemoryRegion *system,
- MemoryRegion *pci, PAMMemoryRegion *mem, uint32_t start, uint32_t size);
+void init_pam(PAMMemoryRegion *mem, Object *owner, MemoryRegion *ram,
+ MemoryRegion *system, MemoryRegion *pci,
+ uint32_t start, uint32_t size);
void pam_update(PAMMemoryRegion *mem, int idx, uint8_t val);
#endif /* QEMU_PAM_H */
diff --git a/include/hw/pci-host/pnv_phb3.h b/include/hw/pci-host/pnv_phb3.h
index 75b787867a..d62b3091ac 100644
--- a/include/hw/pci-host/pnv_phb3.h
+++ b/include/hw/pci-host/pnv_phb3.h
@@ -10,9 +10,9 @@
#ifndef PCI_HOST_PNV_PHB3_H
#define PCI_HOST_PNV_PHB3_H
-#include "hw/pci/pcie_host.h"
-#include "hw/pci/pcie_port.h"
#include "hw/ppc/xics.h"
+#include "qom/object.h"
+#include "hw/pci-host/pnv_phb.h"
typedef struct PnvPHB3 PnvPHB3;
@@ -20,18 +20,20 @@ typedef struct PnvPHB3 PnvPHB3;
* PHB3 XICS Source for MSIs
*/
#define TYPE_PHB3_MSI "phb3-msi"
-#define PHB3_MSI(obj) OBJECT_CHECK(Phb3MsiState, (obj), TYPE_PHB3_MSI)
+typedef struct Phb3MsiState Phb3MsiState;
+DECLARE_INSTANCE_CHECKER(Phb3MsiState, PHB3_MSI,
+ TYPE_PHB3_MSI)
#define PHB3_MAX_MSI 2048
-typedef struct Phb3MsiState {
+struct Phb3MsiState {
ICSState ics;
qemu_irq *qirqs;
PnvPHB3 *phb;
uint64_t rba[PHB3_MAX_MSI / 64];
uint32_t rba_sum;
-} Phb3MsiState;
+};
void pnv_phb3_msi_update_config(Phb3MsiState *msis, uint32_t base,
uint32_t count);
@@ -69,9 +71,9 @@ typedef struct PnvPhb3DMASpace {
* PHB3 Power Bus Common Queue
*/
#define TYPE_PNV_PBCQ "pnv-pbcq"
-#define PNV_PBCQ(obj) OBJECT_CHECK(PnvPBCQState, (obj), TYPE_PNV_PBCQ)
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPBCQState, PNV_PBCQ)
-typedef struct PnvPBCQState {
+struct PnvPBCQState {
DeviceState parent;
uint32_t nest_xbase;
@@ -96,24 +98,25 @@ typedef struct PnvPBCQState {
MemoryRegion xscom_nest_regs;
MemoryRegion xscom_pci_regs;
MemoryRegion xscom_spci_regs;
-} PnvPBCQState;
+};
/*
- * PHB3 PCIe Root port
+ * PHB3 PCIe Root Bus
*/
-#define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root-bus"
-
-#define TYPE_PNV_PHB3_ROOT_PORT "pnv-phb3-root-port"
+#define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root"
+struct PnvPHB3RootBus {
+ PCIBus parent;
-typedef struct PnvPHB3RootPort {
- PCIESlot parent_obj;
-} PnvPHB3RootPort;
+ uint32_t chip_id;
+ uint32_t phb_id;
+};
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3RootBus, PNV_PHB3_ROOT_BUS)
/*
* PHB3 PCIe Host Bridge for PowerNV machines (POWER8)
*/
#define TYPE_PNV_PHB3 "pnv-phb3"
-#define PNV_PHB3(obj) OBJECT_CHECK(PnvPHB3, (obj), TYPE_PNV_PHB3)
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB3, PNV_PHB3)
#define PNV_PHB3_NUM_M64 16
#define PNV_PHB3_NUM_REGS (0x1000 >> 3)
@@ -123,7 +126,9 @@ typedef struct PnvPHB3RootPort {
#define PCI_MMIO_TOTAL_SIZE (0x1ull << 60)
struct PnvPHB3 {
- PCIExpressHost parent_obj;
+ DeviceState parent;
+
+ PnvPHB *phb_base;
uint32_t chip_id;
uint32_t phb_id;
@@ -151,14 +156,15 @@ struct PnvPHB3 {
PnvPBCQState pbcq;
- PnvPHB3RootPort root;
-
QLIST_HEAD(, PnvPhb3DMASpace) dma_spaces;
+
+ PnvChip *chip;
};
uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size);
void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size);
void pnv_phb3_update_regions(PnvPHB3 *phb);
void pnv_phb3_remap_irqs(PnvPHB3 *phb);
+void pnv_phb3_bus_init(DeviceState *dev, PnvPHB3 *phb);
#endif /* PCI_HOST_PNV_PHB3_H */
diff --git a/include/hw/pci-host/pnv_phb3_regs.h b/include/hw/pci-host/pnv_phb3_regs.h
index a174ef1f70..38f8ce9d74 100644
--- a/include/hw/pci-host/pnv_phb3_regs.h
+++ b/include/hw/pci-host/pnv_phb3_regs.h
@@ -13,22 +13,6 @@
#include "qemu/host-utils.h"
/*
- * QEMU version of the GETFIELD/SETFIELD macros
- *
- * These are common with the PnvXive model.
- */
-static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
-{
- return (word & mask) >> ctz64(mask);
-}
-
-static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
- uint64_t value)
-{
- return (word & ~mask) | ((value << ctz64(mask)) & mask);
-}
-
-/*
* PBCQ XSCOM registers
*/
diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h
index c882bfd0aa..3212e68160 100644
--- a/include/hw/pci-host/pnv_phb4.h
+++ b/include/hw/pci-host/pnv_phb4.h
@@ -10,14 +10,14 @@
#ifndef PCI_HOST_PNV_PHB4_H
#define PCI_HOST_PNV_PHB4_H
-#include "hw/pci/pcie_host.h"
-#include "hw/pci/pcie_port.h"
+#include "hw/pci-host/pnv_phb.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/ppc/pnv.h"
#include "hw/ppc/xive.h"
+#include "qom/object.h"
-typedef struct PnvPhb4PecState PnvPhb4PecState;
typedef struct PnvPhb4PecStack PnvPhb4PecStack;
typedef struct PnvPHB4 PnvPHB4;
-typedef struct PnvChip PnvChip;
/*
* We have one such address space wrapper per possible device under
@@ -44,20 +44,22 @@ typedef struct PnvPhb4DMASpace {
} PnvPhb4DMASpace;
/*
- * PHB4 PCIe Root port
+ * PHB4 PCIe Root Bus
*/
-#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root-bus"
-#define TYPE_PNV_PHB4_ROOT_PORT "pnv-phb4-root-port"
+#define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root"
+struct PnvPHB4RootBus {
+ PCIBus parent;
-typedef struct PnvPHB4RootPort {
- PCIESlot parent_obj;
-} PnvPHB4RootPort;
+ uint32_t chip_id;
+ uint32_t phb_id;
+};
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4RootBus, PNV_PHB4_ROOT_BUS)
/*
* PHB4 PCIe Host Bridge for PowerNV machines (POWER9)
*/
#define TYPE_PNV_PHB4 "pnv-phb4"
-#define PNV_PHB4(obj) OBJECT_CHECK(PnvPHB4, (obj), TYPE_PNV_PHB4)
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPHB4, PNV_PHB4)
#define PNV_PHB4_MAX_LSIs 8
#define PNV_PHB4_MAX_INTs 4096
@@ -76,15 +78,15 @@ typedef struct PnvPHB4RootPort {
#define PCI_MMIO_TOTAL_SIZE (0x1ull << 60)
struct PnvPHB4 {
- PCIExpressHost parent_obj;
+ DeviceState parent;
- PnvPHB4RootPort root;
+ PnvPHB *phb_base;
uint32_t chip_id;
uint32_t phb_id;
- uint64_t version;
- uint16_t device_id;
+ /* The owner PEC */
+ PnvPhb4PecState *pec;
char bus_path[8];
@@ -109,6 +111,29 @@ struct PnvPHB4 {
MemoryRegion pci_mmio;
MemoryRegion pci_io;
+ /* PCI registers (excluding pass-through) */
+#define PHB4_PEC_PCI_STK_REGS_COUNT 0xf
+ uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT];
+ MemoryRegion pci_regs_mr;
+
+ /* Nest registers */
+#define PHB4_PEC_NEST_STK_REGS_COUNT 0x18
+ uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT];
+ MemoryRegion nest_regs_mr;
+
+ /* PHB pass-through XSCOM */
+ MemoryRegion phb_regs_mr;
+
+ /* Memory windows from PowerBus to PHB */
+ MemoryRegion phbbar;
+ MemoryRegion intbar;
+ MemoryRegion mmbar0;
+ MemoryRegion mmbar1;
+ uint64_t mmio0_base;
+ uint64_t mmio0_size;
+ uint64_t mmio1_base;
+ uint64_t mmio1_size;
+
/* On-chip IODA tables */
uint64_t ioda_LIST[PNV_PHB4_MAX_LSIs];
uint64_t ioda_MIST[PNV_PHB4_MAX_MIST];
@@ -127,62 +152,20 @@ struct PnvPHB4 {
XiveSource xsrc;
qemu_irq *qirqs;
- PnvPhb4PecStack *stack;
-
QLIST_HEAD(, PnvPhb4DMASpace) dma_spaces;
};
void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon);
-void pnv_phb4_update_regions(PnvPhb4PecStack *stack);
+int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index);
+PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp);
+void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb);
extern const MemoryRegionOps pnv_phb4_xscom_ops;
/*
* PHB4 PEC (PCI Express Controller)
*/
#define TYPE_PNV_PHB4_PEC "pnv-phb4-pec"
-#define PNV_PHB4_PEC(obj) \
- OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB4_PEC)
-
-#define TYPE_PNV_PHB4_PEC_STACK "pnv-phb4-pec-stack"
-#define PNV_PHB4_PEC_STACK(obj) \
- OBJECT_CHECK(PnvPhb4PecStack, (obj), TYPE_PNV_PHB4_PEC_STACK)
-
-/* Per-stack data */
-struct PnvPhb4PecStack {
- DeviceState parent;
-
- /* My own stack number */
- uint32_t stack_no;
-
- /* Nest registers */
-#define PHB4_PEC_NEST_STK_REGS_COUNT 0x17
- uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT];
- MemoryRegion nest_regs_mr;
-
- /* PCI registers (excluding pass-through) */
-#define PHB4_PEC_PCI_STK_REGS_COUNT 0xf
- uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT];
- MemoryRegion pci_regs_mr;
-
- /* PHB pass-through XSCOM */
- MemoryRegion phb_regs_mr;
-
- /* Memory windows from PowerBus to PHB */
- MemoryRegion mmbar0;
- MemoryRegion mmbar1;
- MemoryRegion phbbar;
- MemoryRegion intbar;
- uint64_t mmio0_base;
- uint64_t mmio0_size;
- uint64_t mmio1_base;
- uint64_t mmio1_size;
-
- /* The owner PEC */
- PnvPhb4PecState *pec;
-
- /* The actual PHB */
- PnvPHB4 phb;
-};
+OBJECT_DECLARE_TYPE(PnvPhb4PecState, PnvPhb4PecClass, PNV_PHB4_PEC)
struct PnvPhb4PecState {
DeviceState parent;
@@ -191,30 +174,26 @@ struct PnvPhb4PecState {
uint32_t index;
uint32_t chip_id;
- MemoryRegion *system_memory;
-
/* Nest registers, excuding per-stack */
#define PHB4_PEC_NEST_REGS_COUNT 0xf
uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT];
MemoryRegion nest_regs_mr;
/* PCI registers, excluding per-stack */
-#define PHB4_PEC_PCI_REGS_COUNT 0x2
+#define PHB4_PEC_PCI_REGS_COUNT 0x3
uint64_t pci_regs[PHB4_PEC_PCI_REGS_COUNT];
MemoryRegion pci_regs_mr;
- /* Stacks */
- #define PHB4_PEC_MAX_STACKS 3
- uint32_t num_stacks;
- PnvPhb4PecStack stacks[PHB4_PEC_MAX_STACKS];
+ /* PHBs */
+ uint32_t num_phbs;
+#define MAX_PHBS_PER_PEC 3
+ PnvPHB *phbs[MAX_PHBS_PER_PEC];
+
+ PnvChip *chip;
};
-#define PNV_PHB4_PEC_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvPhb4PecClass, (klass), TYPE_PNV_PHB4_PEC)
-#define PNV_PHB4_PEC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvPhb4PecClass, (obj), TYPE_PNV_PHB4_PEC)
-typedef struct PnvPhb4PecClass {
+struct PnvPhb4PecClass {
DeviceClass parent_class;
uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec);
@@ -225,6 +204,23 @@ typedef struct PnvPhb4PecClass {
int compat_size;
const char *stk_compat;
int stk_compat_size;
-} PnvPhb4PecClass;
+ uint64_t version;
+ const char *phb_type;
+ const uint32_t *num_phbs;
+};
+
+/*
+ * POWER10 definitions
+ */
+
+#define TYPE_PNV_PHB5 "pnv-phb5"
+#define PNV_PHB5(obj) \
+ OBJECT_CHECK(PnvPhb4, (obj), TYPE_PNV_PHB5)
+
+#define PNV_PHB5_VERSION 0x000000a500000002ull
+
+#define TYPE_PNV_PHB5_PEC "pnv-phb5-pec"
+#define PNV_PHB5_PEC(obj) \
+ OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB5_PEC)
#endif /* PCI_HOST_PNV_PHB4_H */
diff --git a/include/hw/pci-host/pnv_phb4_regs.h b/include/hw/pci-host/pnv_phb4_regs.h
index 55df2c3e5e..bea96f4d91 100644
--- a/include/hw/pci-host/pnv_phb4_regs.h
+++ b/include/hw/pci-host/pnv_phb4_regs.h
@@ -77,10 +77,12 @@
#define PEC_NEST_STK_BAR_EN_PHB PPC_BIT(2)
#define PEC_NEST_STK_BAR_EN_INT PPC_BIT(3)
#define PEC_NEST_STK_DATA_FRZ_TYPE 0x15
-#define PEC_NEST_STK_PBCQ_TUN_BAR 0x16
+#define PEC_NEST_STK_PBCQ_SPARSE_PAGE 0x16 /* P10 */
+#define PEC_NEST_STK_PBCQ_CACHE_INJ 0x17 /* P10 */
/* XSCOM PCI global registers */
#define PEC_PCI_PBAIB_HW_CONFIG 0x00
+#define PEC_PCI_PBAIB_HW_OVR 0x01
#define PEC_PCI_PBAIB_READ_STK_OVR 0x02
/* XSCOM PCI per-stack registers */
@@ -220,11 +222,14 @@
#define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63)
#define PHB_ETU_ERR_SUMMARY 0x2c8
#define PHB_INT_NOTIFY_ADDR 0x300
+#define PHB_INT_NOTIFY_ADDR_64K PPC_BIT(1) /* P10 */
#define PHB_INT_NOTIFY_INDEX 0x308
/* Fundamental register set B */
#define PHB_VERSION 0x800
#define PHB_CTRLR 0x810
+#define PHB_CTRLR_IRQ_PQ_DISABLE PPC_BIT(9) /* P10 */
+#define PHB_CTRLR_IRQ_ABT_MODE PPC_BIT(10) /* P10 */
#define PHB_CTRLR_IRQ_PGSZ_64K PPC_BIT(11)
#define PHB_CTRLR_IRQ_STORE_EOI PPC_BIT(12)
#define PHB_CTRLR_MMIO_RD_STRICT PPC_BIT(13)
diff --git a/include/hw/pci-host/ppc4xx.h b/include/hw/pci-host/ppc4xx.h
new file mode 100644
index 0000000000..32396417fc
--- /dev/null
+++ b/include/hw/pci-host/ppc4xx.h
@@ -0,0 +1,17 @@
+/*
+ * QEMU PowerPC 4xx PCI-host definitions
+ *
+ * Copyright (c) 2018-2023 BALATON Zoltan
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_PCIHOST_PPC4XX_H
+#define HW_PCIHOST_PPC4XX_H
+
+#define TYPE_PPC4xx_HOST_BRIDGE "ppc4xx-host-bridge"
+#define TYPE_PPC4xx_PCI_HOST "ppc4xx-pci-host"
+#define TYPE_PPC440_PCIX_HOST "ppc440-pcix-host"
+#define TYPE_PPC460EX_PCIE_HOST "ppc460ex-pcie-host"
+
+#endif
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index 070305f83d..22fadfa3ed 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -22,21 +22,20 @@
#ifndef HW_Q35_H
#define HW_Q35_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci-host/pam.h"
#include "qemu/units.h"
#include "qemu/range.h"
+#include "qom/object.h"
#define TYPE_Q35_HOST_DEVICE "q35-pcihost"
-#define Q35_HOST_DEVICE(obj) \
- OBJECT_CHECK(Q35PCIHost, (obj), TYPE_Q35_HOST_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(Q35PCIHost, Q35_HOST_DEVICE)
#define TYPE_MCH_PCI_DEVICE "mch"
-#define MCH_PCI_DEVICE(obj) \
- OBJECT_CHECK(MCHPCIState, (obj), TYPE_MCH_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(MCHPCIState, MCH_PCI_DEVICE)
-typedef struct MCHPCIState {
+struct MCHPCIState {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
@@ -45,28 +44,28 @@ typedef struct MCHPCIState {
MemoryRegion *pci_address_space;
MemoryRegion *system_memory;
MemoryRegion *address_space_io;
- PAMMemoryRegion pam_regions[13];
+ PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
MemoryRegion smram_region, open_high_smram;
MemoryRegion smram, low_smram, high_smram;
MemoryRegion tseg_blackhole, tseg_window;
MemoryRegion smbase_blackhole, smbase_window;
bool has_smram_at_smbase;
+ bool has_smm_ranges;
Range pci_hole;
uint64_t below_4g_mem_size;
uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
- uint32_t short_root_bus;
uint16_t ext_tseg_mbytes;
-} MCHPCIState;
+};
-typedef struct Q35PCIHost {
+struct Q35PCIHost {
/*< private >*/
PCIExpressHost parent_obj;
/*< public >*/
bool pci_hole64_fix;
MCHPCIState mch;
-} Q35PCIHost;
+};
#define Q35_MASK(bit, ms_bit, ls_bit) \
((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1)))
@@ -75,11 +74,6 @@ typedef struct Q35PCIHost {
* gmch part
*/
-#define MCH_HOST_PROP_RAM_MEM "ram-mem"
-#define MCH_HOST_PROP_PCI_MEM "pci-mem"
-#define MCH_HOST_PROP_SYSTEM_MEM "system-mem"
-#define MCH_HOST_PROP_IO_MEM "io-mem"
-
/* PCI configuration */
#define MCH_HOST_BRIDGE "MCH"
diff --git a/include/hw/pci-host/remote.h b/include/hw/pci-host/remote.h
new file mode 100644
index 0000000000..690a01f0fe
--- /dev/null
+++ b/include/hw/pci-host/remote.h
@@ -0,0 +1,30 @@
+/*
+ * PCI Host for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PCI_HOST_REMOTE_H
+#define PCI_HOST_REMOTE_H
+
+#include "exec/memory.h"
+#include "hw/pci/pcie_host.h"
+
+#define TYPE_REMOTE_PCIHOST "remote-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(RemotePCIHost, REMOTE_PCIHOST)
+
+struct RemotePCIHost {
+ /*< private >*/
+ PCIExpressHost parent_obj;
+ /*< public >*/
+
+ MemoryRegion *mr_pci_mem;
+ MemoryRegion *mr_sys_io;
+ MemoryRegion *mr_sys_mem;
+};
+
+#endif
diff --git a/include/hw/pci-host/sabre.h b/include/hw/pci-host/sabre.h
index 99b5aefbec..d12de84ea2 100644
--- a/include/hw/pci-host/sabre.h
+++ b/include/hw/pci-host/sabre.h
@@ -1,9 +1,10 @@
#ifndef HW_PCI_HOST_SABRE_H
#define HW_PCI_HOST_SABRE_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
#include "hw/sparc/sun4u_iommu.h"
+#include "qom/object.h"
#define MAX_IVEC 0x40
@@ -16,15 +17,14 @@
#define OBIO_MSE_IRQ 0x2a
#define OBIO_SER_IRQ 0x2b
-typedef struct SabrePCIState {
+struct SabrePCIState {
PCIDevice parent_obj;
-} SabrePCIState;
+};
#define TYPE_SABRE_PCI_DEVICE "sabre-pci"
-#define SABRE_PCI_DEVICE(obj) \
- OBJECT_CHECK(SabrePCIState, (obj), TYPE_SABRE_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(SabrePCIState, SABRE_PCI_DEVICE)
-typedef struct SabreState {
+struct SabreState {
PCIHostState parent_obj;
hwaddr special_base;
@@ -45,10 +45,9 @@ typedef struct SabreState {
unsigned int irq_request;
uint32_t reset_control;
unsigned int nr_resets;
-} SabreState;
+};
#define TYPE_SABRE "sabre"
-#define SABRE_DEVICE(obj) \
- OBJECT_CHECK(SabreState, (obj), TYPE_SABRE)
+OBJECT_DECLARE_SIMPLE_TYPE(SabreState, SABRE)
#endif
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 600eb55c34..3778aac27b 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,15 +24,14 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_host.h"
#include "hw/ppc/xics.h"
+#include "qom/object.h"
#define TYPE_SPAPR_PCI_HOST_BRIDGE "spapr-pci-host-bridge"
-#define SPAPR_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(SpaprPhbState, (obj), TYPE_SPAPR_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprPhbState, SPAPR_PCI_HOST_BRIDGE)
#define SPAPR_PCI_DMA_MAX_WINDOWS 2
-typedef struct SpaprPhbState SpaprPhbState;
typedef struct SpaprPciMsi {
uint32_t first_irq;
@@ -48,8 +47,6 @@ typedef struct SpaprPciLsi {
uint32_t irq;
} SpaprPciLsi;
-typedef struct SpaprPhbPciNvGpuConfig SpaprPhbPciNvGpuConfig;
-
struct SpaprPhbState {
PCIHostState parent_obj;
@@ -91,9 +88,6 @@ struct SpaprPhbState {
uint32_t mig_liobn;
hwaddr mig_mem_win_addr, mig_mem_win_size;
hwaddr mig_io_win_addr, mig_io_win_size;
- hwaddr nv2_gpa_win_addr;
- hwaddr nv2_atsd_win_addr;
- SpaprPhbPciNvGpuConfig *nvgpus;
bool pre_5_1_assoc;
};
@@ -113,22 +107,6 @@ struct SpaprPhbState {
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
-#define SPAPR_PCI_NV2RAM64_WIN_BASE SPAPR_PCI_LIMIT
-#define SPAPR_PCI_NV2RAM64_WIN_SIZE (2 * TiB) /* For up to 6 GPUs 256GB each */
-
-/* Max number of these GPUsper a physical box */
-#define NVGPU_MAX_NUM 6
-/* Max number of NVLinks per GPU in any physical box */
-#define NVGPU_MAX_LINKS 3
-
-/*
- * GPU RAM starts at 64TiB so huge DMA window to cover it all ends at 128TiB
- * which is enough. We do not need DMA for ATSD so we put them at 128TiB.
- */
-#define SPAPR_PCI_NV2ATSD_WIN_BASE (128 * TiB)
-#define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \
- 64 * KiB)
-
int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb,
uint32_t intc_phandle, void *fdt, int *node_offset);
@@ -152,13 +130,6 @@ int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state);
int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option);
int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb);
void spapr_phb_vfio_reset(DeviceState *qdev);
-void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp);
-void spapr_phb_nvgpu_free(SpaprPhbState *sphb);
-void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
- Error **errp);
-void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt);
-void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
- SpaprPhbState *sphb);
#else
static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb)
{
@@ -185,25 +156,6 @@ static inline int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb)
static inline void spapr_phb_vfio_reset(DeviceState *qdev)
{
}
-static inline void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
-{
-}
-static inline void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
-{
-}
-static inline void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt,
- int bus_off, Error **errp)
-{
-}
-static inline void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb,
- void *fdt)
-{
-}
-static inline void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt,
- int offset,
- SpaprPhbState *sphb)
-{
-}
#endif
void spapr_phb_dma_reset(SpaprPhbState *sphb);
@@ -213,4 +165,6 @@ static inline unsigned spapr_phb_windows_supported(SpaprPhbState *sphb)
return sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1;
}
+char *spapr_pci_fw_dev_name(PCIDevice *dev);
+
#endif /* PCI_HOST_SPAPR_H */
diff --git a/include/hw/pci-host/uninorth.h b/include/hw/pci-host/uninorth.h
index 72d2a97355..62bd81e721 100644
--- a/include/hw/pci-host/uninorth.h
+++ b/include/hw/pci-host/uninorth.h
@@ -26,7 +26,7 @@
#define UNINORTH_H
#include "hw/pci/pci_host.h"
-#include "hw/ppc/openpic.h"
+#include "qom/object.h"
/* UniNorth version */
#define UNINORTH_VERSION_10A 0x7
@@ -36,34 +36,33 @@
#define TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE "uni-north-internal-pci-pcihost"
#define TYPE_U3_AGP_HOST_BRIDGE "u3-agp-pcihost"
-#define UNI_NORTH_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_PCI_HOST_BRIDGE)
-#define UNI_NORTH_AGP_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_AGP_HOST_BRIDGE)
-#define UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE)
-#define U3_AGP_HOST_BRIDGE(obj) \
- OBJECT_CHECK(UNINHostState, (obj), TYPE_U3_AGP_HOST_BRIDGE)
+typedef struct UNINHostState UNINHostState;
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_PCI_HOST_BRIDGE,
+ TYPE_UNI_NORTH_PCI_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_AGP_HOST_BRIDGE,
+ TYPE_UNI_NORTH_AGP_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE,
+ TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE)
+DECLARE_INSTANCE_CHECKER(UNINHostState, U3_AGP_HOST_BRIDGE,
+ TYPE_U3_AGP_HOST_BRIDGE)
-typedef struct UNINHostState {
+struct UNINHostState {
PCIHostState parent_obj;
uint32_t ofw_addr;
- OpenPICState *pic;
qemu_irq irqs[4];
MemoryRegion pci_mmio;
MemoryRegion pci_hole;
MemoryRegion pci_io;
-} UNINHostState;
+};
-typedef struct UNINState {
+struct UNINState {
SysBusDevice parent_obj;
MemoryRegion mem;
-} UNINState;
+};
#define TYPE_UNI_NORTH "uni-north"
-#define UNI_NORTH(obj) \
- OBJECT_CHECK(UNINState, (obj), TYPE_UNI_NORTH)
+OBJECT_DECLARE_SIMPLE_TYPE(UNINState, UNI_NORTH)
#endif /* UNINORTH_H */
diff --git a/include/hw/pci-host/xilinx-pcie.h b/include/hw/pci-host/xilinx-pcie.h
index c0f15314be..e1b3c1c280 100644
--- a/include/hw/pci-host/xilinx-pcie.h
+++ b/include/hw/pci-host/xilinx-pcie.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,28 +21,26 @@
#define HW_XILINX_PCIE_H
#include "hw/sysbus.h"
-#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pcie_host.h"
+#include "qom/object.h"
#define TYPE_XILINX_PCIE_HOST "xilinx-pcie-host"
-#define XILINX_PCIE_HOST(obj) \
- OBJECT_CHECK(XilinxPCIEHost, (obj), TYPE_XILINX_PCIE_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxPCIEHost, XILINX_PCIE_HOST)
#define TYPE_XILINX_PCIE_ROOT "xilinx-pcie-root"
-#define XILINX_PCIE_ROOT(obj) \
- OBJECT_CHECK(XilinxPCIERoot, (obj), TYPE_XILINX_PCIE_ROOT)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxPCIERoot, XILINX_PCIE_ROOT)
-typedef struct XilinxPCIERoot {
+struct XilinxPCIERoot {
PCIBridge parent_obj;
-} XilinxPCIERoot;
+};
typedef struct XilinxPCIEInt {
uint32_t fifo_reg1;
uint32_t fifo_reg2;
} XilinxPCIEInt;
-typedef struct XilinxPCIEHost {
+struct XilinxPCIEHost {
PCIExpressHost parent_obj;
char name[16];
@@ -62,6 +60,6 @@ typedef struct XilinxPCIEHost {
XilinxPCIEInt intr_fifo[16];
unsigned int intr_fifo_r, intr_fifo_w;
uint32_t rpscr;
-} XilinxPCIEHost;
+};
#endif /* HW_XILINX_PCIE_H */
diff --git a/include/hw/pci/msi.h b/include/hw/pci/msi.h
index 4087688486..abcfd13925 100644
--- a/include/hw/pci/msi.h
+++ b/include/hw/pci/msi.h
@@ -21,7 +21,7 @@
#ifndef QEMU_MSI_H
#define QEMU_MSI_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
struct MSIMessage {
uint64_t address;
@@ -33,6 +33,7 @@ extern bool msi_nonbroken;
void msi_set_message(PCIDevice *dev, MSIMessage msg);
MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector);
bool msi_enabled(const PCIDevice *dev);
+void msi_set_enabled(PCIDevice *dev);
int msi_init(struct PCIDevice *dev, uint8_t offset,
unsigned int nr_vectors, bool msi64bit,
bool msi_per_vector_mask, Error **errp);
@@ -43,6 +44,7 @@ void msi_notify(PCIDevice *dev, unsigned int vector);
void msi_send_message(PCIDevice *dev, MSIMessage msg);
void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len);
unsigned int msi_nr_vectors_allocated(const PCIDevice *dev);
+void msi_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp);
static inline bool msi_present(const PCIDevice *dev)
{
diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
index 4c4a60c739..0e6f257e45 100644
--- a/include/hw/pci/msix.h
+++ b/include/hw/pci/msix.h
@@ -33,9 +33,10 @@ bool msix_is_masked(PCIDevice *dev, unsigned vector);
void msix_set_pending(PCIDevice *dev, unsigned vector);
void msix_clr_pending(PCIDevice *dev, int vector);
-int msix_vector_use(PCIDevice *dev, unsigned vector);
+void msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_unuse(PCIDevice *dev, unsigned vector);
void msix_unuse_all_vectors(PCIDevice *dev);
+void msix_set_mask(PCIDevice *dev, int vector, bool mask);
void msix_notify(PCIDevice *dev, unsigned vector);
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index c1bf7d5356..eaa3fc99d8 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -7,8 +7,6 @@
/* PCI includes legacy ISA access. */
#include "hw/isa/isa.h"
-#include "hw/pci/pcie.h"
-
extern bool pci_available;
/* PCI bus */
@@ -18,6 +16,7 @@ extern bool pci_available;
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn))
+#define PCI_BDF_TO_DEVFN(x) ((x) & 0xff)
#define PCI_BUS_MAX 256
#define PCI_DEVFN_MAX 256
#define PCI_SLOT_MAX 32
@@ -77,6 +76,7 @@ extern bool pci_available;
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
+/* legacy virtio-pci devices */
#define PCI_DEVICE_ID_VIRTIO_NET 0x1000
#define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001
#define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002
@@ -85,9 +85,15 @@ extern bool pci_available;
#define PCI_DEVICE_ID_VIRTIO_RNG 0x1005
#define PCI_DEVICE_ID_VIRTIO_9P 0x1009
#define PCI_DEVICE_ID_VIRTIO_VSOCK 0x1012
-#define PCI_DEVICE_ID_VIRTIO_PMEM 0x1013
-#define PCI_DEVICE_ID_VIRTIO_IOMMU 0x1014
-#define PCI_DEVICE_ID_VIRTIO_MEM 0x1015
+
+/*
+ * modern virtio-pci devices get their id assigned automatically,
+ * there is no need to add #defines here. It gets calculated as
+ *
+ * PCI_DEVICE_ID = PCI_DEVICE_ID_VIRTIO_10_BASE +
+ * virtio_bus_get_vdev_id(bus)
+ */
+#define PCI_DEVICE_ID_VIRTIO_10_BASE 0x1040
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001
@@ -105,6 +111,10 @@ extern bool pci_available;
#define PCI_DEVICE_ID_REDHAT_XHCI 0x000d
#define PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE 0x000e
#define PCI_DEVICE_ID_REDHAT_MDPY 0x000f
+#define PCI_DEVICE_ID_REDHAT_NVME 0x0010
+#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
+#define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012
+#define PCI_DEVICE_ID_REDHAT_UFS 0x0013
#define PCI_DEVICE_ID_REDHAT_QXL 0x0100
#define FMT_PCIBUS PRIx64
@@ -126,6 +136,10 @@ typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
pcibus_t addr, pcibus_t size, int type);
typedef void PCIUnregisterFunc(PCIDevice *pci_dev);
+typedef void MSITriggerFunc(PCIDevice *dev, MSIMessage msg);
+typedef MSIMessage MSIPrepareMessageFunc(PCIDevice *dev, unsigned vector);
+typedef MSIMessage MSIxPrepareMessageFunc(PCIDevice *dev, unsigned vector);
+
typedef struct PCIIORegion {
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
#define PCI_BAR_UNMAPPED (~(pcibus_t)0)
@@ -192,22 +206,14 @@ enum {
QEMU_PCIE_LNKSTA_DLLLA = (1 << QEMU_PCIE_LNKSTA_DLLLA_BITNR),
#define QEMU_PCIE_EXTCAP_INIT_BITNR 9
QEMU_PCIE_EXTCAP_INIT = (1 << QEMU_PCIE_EXTCAP_INIT_BITNR),
+#define QEMU_PCIE_CXL_BITNR 10
+ QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR),
+#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11
+ QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
+#define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12
+ QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR),
};
-#define TYPE_PCI_DEVICE "pci-device"
-#define PCI_DEVICE(obj) \
- OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
-#define PCI_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIDeviceClass, (klass), TYPE_PCI_DEVICE)
-#define PCI_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIDeviceClass, (obj), TYPE_PCI_DEVICE)
-
-/* Implemented by devices that can be plugged on PCI Express buses */
-#define INTERFACE_PCIE_DEVICE "pci-express-device"
-
-/* Implemented by devices that can be plugged on Conventional PCI buses */
-#define INTERFACE_CONVENTIONAL_PCI_DEVICE "conventional-pci-device"
-
typedef struct PCIINTxRoute {
enum {
PCI_INTX_ENABLED,
@@ -217,32 +223,6 @@ typedef struct PCIINTxRoute {
int irq;
} PCIINTxRoute;
-typedef struct PCIDeviceClass {
- DeviceClass parent_class;
-
- void (*realize)(PCIDevice *dev, Error **errp);
- PCIUnregisterFunc *exit;
- PCIConfigReadFunc *config_read;
- PCIConfigWriteFunc *config_write;
-
- uint16_t vendor_id;
- uint16_t device_id;
- uint8_t revision;
- uint16_t class_id;
- uint16_t subsystem_vendor_id; /* only for header type = 0 */
- uint16_t subsystem_id; /* only for header type = 0 */
-
- /*
- * pci-to-pci bridge or normal device.
- * This doesn't mean pci host switch.
- * When card bus bridge is supported, this would be enhanced.
- */
- bool is_bridge;
-
- /* rom bar */
- const char *romfile;
-} PCIDeviceClass;
-
typedef void (*PCIINTxRoutingNotifier)(PCIDevice *dev);
typedef int (*MSIVectorUseNotifier)(PCIDevice *dev, unsigned int vector,
MSIMessage msg);
@@ -251,115 +231,6 @@ typedef void (*MSIVectorPollNotifier)(PCIDevice *dev,
unsigned int vector_start,
unsigned int vector_end);
-enum PCIReqIDType {
- PCI_REQ_ID_INVALID = 0,
- PCI_REQ_ID_BDF,
- PCI_REQ_ID_SECONDARY_BUS,
- PCI_REQ_ID_MAX,
-};
-typedef enum PCIReqIDType PCIReqIDType;
-
-struct PCIReqIDCache {
- PCIDevice *dev;
- PCIReqIDType type;
-};
-typedef struct PCIReqIDCache PCIReqIDCache;
-
-struct PCIDevice {
- DeviceState qdev;
- bool partially_hotplugged;
-
- /* PCI config space */
- uint8_t *config;
-
- /* Used to enable config checks on load. Note that writable bits are
- * never checked even if set in cmask. */
- uint8_t *cmask;
-
- /* Used to implement R/W bytes */
- uint8_t *wmask;
-
- /* Used to implement RW1C(Write 1 to Clear) bytes */
- uint8_t *w1cmask;
-
- /* Used to allocate config space for capabilities. */
- uint8_t *used;
-
- /* the following fields are read only */
- int32_t devfn;
- /* Cached device to fetch requester ID from, to avoid the PCI
- * tree walking every time we invoke PCI request (e.g.,
- * MSI). For conventional PCI root complex, this field is
- * meaningless. */
- PCIReqIDCache requester_id_cache;
- char name[64];
- PCIIORegion io_regions[PCI_NUM_REGIONS];
- AddressSpace bus_master_as;
- MemoryRegion bus_master_container_region;
- MemoryRegion bus_master_enable_region;
-
- /* do not access the following fields */
- PCIConfigReadFunc *config_read;
- PCIConfigWriteFunc *config_write;
-
- /* Legacy PCI VGA regions */
- MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS];
- bool has_vga;
-
- /* Current IRQ levels. Used internally by the generic PCI code. */
- uint8_t irq_state;
-
- /* Capability bits */
- uint32_t cap_present;
-
- /* Offset of MSI-X capability in config space */
- uint8_t msix_cap;
-
- /* MSI-X entries */
- int msix_entries_nr;
-
- /* Space to store MSIX table & pending bit array */
- uint8_t *msix_table;
- uint8_t *msix_pba;
- /* MemoryRegion container for msix exclusive BAR setup */
- MemoryRegion msix_exclusive_bar;
- /* Memory Regions for MSIX table and pending bit entries. */
- MemoryRegion msix_table_mmio;
- MemoryRegion msix_pba_mmio;
- /* Reference-count for entries actually in use by driver. */
- unsigned *msix_entry_used;
- /* MSIX function mask set or MSIX disabled */
- bool msix_function_masked;
- /* Version id needed for VMState */
- int32_t version_id;
-
- /* Offset of MSI capability in config space */
- uint8_t msi_cap;
-
- /* PCI Express */
- PCIExpressDevice exp;
-
- /* SHPC */
- SHPCDevice *shpc;
-
- /* Location of option rom */
- char *romfile;
- bool has_rom;
- MemoryRegion rom;
- uint32_t rom_bar;
-
- /* INTx routing notifier */
- PCIINTxRoutingNotifier intx_routing_notifier;
-
- /* MSI-X notifiers */
- MSIVectorUseNotifier msix_vector_use_notifier;
- MSIVectorReleaseNotifier msix_vector_release_notifier;
- MSIVectorPollNotifier msix_vector_poll_notifier;
-
- /* ID of standby device in net_failover pair */
- char *failover_pair_id;
-};
-
void pci_register_bar(PCIDevice *pci_dev, int region_num,
uint8_t attr, MemoryRegion *memory);
void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
@@ -396,27 +267,32 @@ typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin);
#define TYPE_PCI_BUS "PCI"
-#define PCI_BUS(obj) OBJECT_CHECK(PCIBus, (obj), TYPE_PCI_BUS)
-#define PCI_BUS_CLASS(klass) OBJECT_CLASS_CHECK(PCIBusClass, (klass), TYPE_PCI_BUS)
-#define PCI_BUS_GET_CLASS(obj) OBJECT_GET_CLASS(PCIBusClass, (obj), TYPE_PCI_BUS)
+OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS)
#define TYPE_PCIE_BUS "PCIE"
+#define TYPE_CXL_BUS "CXL"
+
+typedef void (*pci_bus_dev_fn)(PCIBus *b, PCIDevice *d, void *opaque);
+typedef void (*pci_bus_fn)(PCIBus *b, void *opaque);
+typedef void *(*pci_bus_ret_fn)(PCIBus *b, void *opaque);
-bool pci_bus_is_express(PCIBus *bus);
+bool pci_bus_is_express(const PCIBus *bus);
-void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
- const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
- uint8_t devfn_min, const char *typename);
+void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
+ const char *name,
+ MemoryRegion *mem, MemoryRegion *io,
+ uint8_t devfn_min, const char *typename);
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename);
void pci_root_bus_cleanup(PCIBus *bus);
-void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
+void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
void *irq_opaque, int nirq);
+void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq);
void pci_bus_irqs_cleanup(PCIBus *bus);
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
+uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus);
+void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask);
+void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask);
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
static inline int pci_swizzle(int slot, int pin)
{
@@ -426,8 +302,7 @@ int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, int nirq,
const char *typename);
void pci_unregister_root_bus(PCIBus *bus);
@@ -439,10 +314,9 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev,
PCIINTxRoutingNotifier notifier);
void pci_device_reset(PCIDevice *dev);
-PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
- const char *default_model,
- const char *default_devaddr);
-
+void pci_init_nic_devices(PCIBus *bus, const char *default_model);
+bool pci_init_nic_in_slot(PCIBus *rootbus, const char *default_model,
+ const char *alias, const char *devaddr);
PCIDevice *pci_vga_init(PCIBus *bus);
static inline PCIBus *pci_get_bus(const PCIDevice *dev)
@@ -450,6 +324,7 @@ static inline PCIBus *pci_get_bus(const PCIDevice *dev)
return PCI_BUS(qdev_get_parent_bus(DEVICE(dev)));
}
int pci_bus_num(PCIBus *s);
+void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus);
static inline int pci_dev_bus_num(const PCIDevice *dev)
{
return pci_bus_num(pci_get_bus(dev));
@@ -457,39 +332,75 @@ static inline int pci_dev_bus_num(const PCIDevice *dev)
int pci_bus_numa_node(PCIBus *bus);
void pci_for_each_device(PCIBus *bus, int bus_num,
- void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
+ pci_bus_dev_fn fn,
void *opaque);
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
- void (*fn)(PCIBus *bus, PCIDevice *d,
- void *opaque),
+ pci_bus_dev_fn fn,
void *opaque);
-void pci_for_each_bus_depth_first(PCIBus *bus,
- void *(*begin)(PCIBus *bus, void *parent_state),
- void (*end)(PCIBus *bus, void *state),
- void *parent_state);
+void pci_for_each_device_under_bus(PCIBus *bus,
+ pci_bus_dev_fn fn, void *opaque);
+void pci_for_each_device_under_bus_reverse(PCIBus *bus,
+ pci_bus_dev_fn fn,
+ void *opaque);
+void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
+ pci_bus_fn end, void *parent_state);
PCIDevice *pci_get_function_0(PCIDevice *pci_dev);
/* Use this wrapper when specific scan order is not required. */
static inline
-void pci_for_each_bus(PCIBus *bus,
- void (*fn)(PCIBus *bus, void *opaque),
- void *opaque)
+void pci_for_each_bus(PCIBus *bus, pci_bus_fn fn, void *opaque)
{
pci_for_each_bus_depth_first(bus, NULL, fn, opaque);
}
PCIBus *pci_device_root_bus(const PCIDevice *d);
const char *pci_root_bus_path(PCIDevice *dev);
+bool pci_bus_bypass_iommu(PCIBus *bus);
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn);
int pci_qdev_find_device(const char *id, PCIDevice **pdev);
void pci_bus_get_w64_range(PCIBus *bus, Range *range);
void pci_device_deassert_intx(PCIDevice *dev);
-typedef AddressSpace *(*PCIIOMMUFunc)(PCIBus *, void *, int);
+
+/**
+ * struct PCIIOMMUOps: callbacks structure for specific IOMMU handlers
+ * of a PCIBus
+ *
+ * Allows to modify the behavior of some IOMMU operations of the PCI
+ * framework for a set of devices on a PCI bus.
+ */
+typedef struct PCIIOMMUOps {
+ /**
+ * @get_address_space: get the address space for a set of devices
+ * on a PCI bus.
+ *
+ * Mandatory callback which returns a pointer to an #AddressSpace
+ *
+ * @bus: the #PCIBus being accessed.
+ *
+ * @opaque: the data passed to pci_setup_iommu().
+ *
+ * @devfn: device and function number
+ */
+ AddressSpace * (*get_address_space)(PCIBus *bus, void *opaque, int devfn);
+} PCIIOMMUOps;
AddressSpace *pci_device_iommu_address_space(PCIDevice *dev);
-void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque);
+
+/**
+ * pci_setup_iommu: Initialize specific IOMMU handlers for a PCIBus
+ *
+ * Let PCI host bridges define specific operations.
+ *
+ * @bus: the #PCIBus being updated.
+ * @ops: the #PCIIOMMUOps
+ * @opaque: passed to callbacks of the @ops structure.
+ */
+void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque);
+
+pcibus_t pci_bar_address(PCIDevice *d,
+ int reg, uint8_t type, pcibus_t size);
static inline void
pci_set_byte(uint8_t *config, uint8_t val)
@@ -657,69 +568,51 @@ static inline void
pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg)
{
uint8_t val = pci_get_byte(config);
- uint8_t rval = reg << ctz32(mask);
- pci_set_byte(config, (~mask & val) | (mask & rval));
-}
+ uint8_t rval;
-static inline uint8_t
-pci_get_byte_by_mask(uint8_t *config, uint8_t mask)
-{
- uint8_t val = pci_get_byte(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_byte(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg)
{
uint16_t val = pci_get_word(config);
- uint16_t rval = reg << ctz32(mask);
- pci_set_word(config, (~mask & val) | (mask & rval));
-}
+ uint16_t rval;
-static inline uint16_t
-pci_get_word_by_mask(uint8_t *config, uint16_t mask)
-{
- uint16_t val = pci_get_word(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_word(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg)
{
uint32_t val = pci_get_long(config);
- uint32_t rval = reg << ctz32(mask);
- pci_set_long(config, (~mask & val) | (mask & rval));
-}
+ uint32_t rval;
-static inline uint32_t
-pci_get_long_by_mask(uint8_t *config, uint32_t mask)
-{
- uint32_t val = pci_get_long(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_long(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
{
uint64_t val = pci_get_quad(config);
- uint64_t rval = reg << ctz32(mask);
- pci_set_quad(config, (~mask & val) | (mask & rval));
-}
+ uint64_t rval;
-static inline uint64_t
-pci_get_quad_by_mask(uint8_t *config, uint64_t mask)
-{
- uint64_t val = pci_get_quad(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_quad(config, (~mask & val) | (mask & rval));
}
-PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
- const char *name);
+PCIDevice *pci_new_multifunction(int devfn, const char *name);
PCIDevice *pci_new(int devfn, const char *name);
bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp);
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
- bool multifunction,
const char *name);
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
@@ -748,122 +641,7 @@ static inline void pci_irq_pulse(PCIDevice *pci_dev)
pci_irq_deassert(pci_dev);
}
-static inline int pci_is_express(const PCIDevice *d)
-{
- return d->cap_present & QEMU_PCI_CAP_EXPRESS;
-}
-
-static inline int pci_is_express_downstream_port(const PCIDevice *d)
-{
- uint8_t type;
-
- if (!pci_is_express(d) || !d->exp.exp_cap) {
- return 0;
- }
-
- type = pcie_cap_get_type(d);
-
- return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT;
-}
-
-static inline uint32_t pci_config_size(const PCIDevice *d)
-{
- return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
-}
-
-static inline uint16_t pci_get_bdf(PCIDevice *dev)
-{
- return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
-}
-
-uint16_t pci_requester_id(PCIDevice *dev);
-
-/* DMA access functions */
-static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
-{
- return &dev->bus_master_as;
-}
-
-static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
- void *buf, dma_addr_t len, DMADirection dir)
-{
- dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir);
- return 0;
-}
-
-static inline int pci_dma_read(PCIDevice *dev, dma_addr_t addr,
- void *buf, dma_addr_t len)
-{
- return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
-}
-
-static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr,
- const void *buf, dma_addr_t len)
-{
- return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE);
-}
-
-#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
- static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
- dma_addr_t addr) \
- { \
- return ld##_l##_dma(pci_get_address_space(dev), addr); \
- } \
- static inline void st##_s##_pci_dma(PCIDevice *dev, \
- dma_addr_t addr, uint##_bits##_t val) \
- { \
- st##_s##_dma(pci_get_address_space(dev), addr, val); \
- }
-
-PCI_DMA_DEFINE_LDST(ub, b, 8);
-PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
-PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
-PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
-PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
-PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
-PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
-
-#undef PCI_DMA_DEFINE_LDST
-
-static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
- dma_addr_t *plen, DMADirection dir)
-{
- void *buf;
-
- buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir);
- return buf;
-}
-
-static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
- DMADirection dir, dma_addr_t access_len)
-{
- dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len);
-}
-
-static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
- int alloc_hint)
-{
- qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
-}
-
-extern const VMStateDescription vmstate_pci_device;
-
-#define VMSTATE_PCI_DEVICE(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(PCIDevice), \
- .vmsd = &vmstate_pci_device, \
- .flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, PCIDevice), \
-}
-
-#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(PCIDevice), \
- .vmsd = &vmstate_pci_device, \
- .flags = VMS_STRUCT|VMS_POINTER, \
- .offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
-}
-
MSIMessage pci_get_msi_message(PCIDevice *dev, int vector);
+void pci_set_power(PCIDevice *pci_dev, bool state);
#endif
diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h
index 99c674e949..5cd452115a 100644
--- a/include/hw/pci/pci_bridge.h
+++ b/include/hw/pci/pci_bridge.h
@@ -26,8 +26,10 @@
#ifndef QEMU_PCI_BRIDGE_H
#define QEMU_PCI_BRIDGE_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/pci/pci_bus.h"
+#include "hw/cxl/cxl.h"
+#include "qom/object.h"
typedef struct PCIBridgeWindows PCIBridgeWindows;
@@ -50,7 +52,8 @@ struct PCIBridgeWindows {
};
#define TYPE_PCI_BRIDGE "base-pci-bridge"
-#define PCI_BRIDGE(obj) OBJECT_CHECK(PCIBridge, (obj), TYPE_PCI_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIBridge, PCI_BRIDGE)
+#define IS_PCI_BRIDGE(dev) object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)
struct PCIBridge {
/*< private >*/
@@ -70,15 +73,49 @@ struct PCIBridge {
MemoryRegion address_space_mem;
MemoryRegion address_space_io;
- PCIBridgeWindows *windows;
+ PCIBridgeWindows windows;
pci_map_irq_fn map_irq;
const char *bus_name;
+
+ /* SLT is RO for PCIE to PCIE bridges, but old QEMU versions had it RW */
+ bool pcie_writeable_slt_bug;
};
#define PCI_BRIDGE_DEV_PROP_CHASSIS_NR "chassis_nr"
#define PCI_BRIDGE_DEV_PROP_MSI "msi"
#define PCI_BRIDGE_DEV_PROP_SHPC "shpc"
+typedef struct CXLHost CXLHost;
+
+typedef struct PXBDev {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ uint8_t bus_nr;
+ uint16_t numa_node;
+ bool bypass_iommu;
+} PXBDev;
+
+typedef struct PXBPCIEDev {
+ /*< private >*/
+ PXBDev parent_obj;
+} PXBPCIEDev;
+
+#define TYPE_PXB_DEV "pxb"
+OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV)
+
+typedef struct PXBCXLDev {
+ /*< private >*/
+ PXBPCIEDev parent_obj;
+ /*< public >*/
+
+ bool hdm_for_passthrough;
+ CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */
+} PXBCXLDev;
+
+#define TYPE_PXB_CXL_DEV "pxb-cxl"
+OBJECT_DECLARE_SIMPLE_TYPE(PXBCXLDev, PXB_CXL_DEV)
int pci_bridge_ssvid_init(PCIDevice *dev, uint8_t offset,
uint16_t svid, uint16_t ssid,
@@ -115,11 +152,11 @@ void pci_bridge_map_irq(PCIBridge *br, const char* bus_name,
pci_map_irq_fn map_irq);
/* TODO: add this define to pci_regs.h in linux and then in qemu. */
-#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
-#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
-#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
-#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
-#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
+#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
+#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
+#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
+#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
+#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
typedef struct PCIBridgeQemuCap {
uint8_t id; /* Standard PCI capability header field */
@@ -137,6 +174,7 @@ typedef struct PCIBridgeQemuCap {
uint64_t mem_pref_64; /* Prefetchable memory to reserve (64-bit MMIO) */
} PCIBridgeQemuCap;
+#define REDHAT_PCI_CAP_TYPE_OFFSET 3
#define REDHAT_PCI_CAP_RESOURCE_RESERVE 1
/*
@@ -151,6 +189,13 @@ typedef struct PCIResReserve {
uint64_t mem_pref_64;
} PCIResReserve;
+#define REDHAT_PCI_CAP_RES_RESERVE_BUS_RES 4
+#define REDHAT_PCI_CAP_RES_RESERVE_IO 8
+#define REDHAT_PCI_CAP_RES_RESERVE_MEM 16
+#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_32 20
+#define REDHAT_PCI_CAP_RES_RESERVE_PREF_MEM_64 24
+#define REDHAT_PCI_CAP_RES_RESERVE_CAP_SIZE 32
+
int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset,
PCIResReserve res_reserve, Error **errp);
diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h
index 0714f578af..2261312546 100644
--- a/include/hw/pci/pci_bus.h
+++ b/include/hw/pci/pci_bus.h
@@ -10,26 +10,30 @@
* use accessor functions in pci.h
*/
-typedef struct PCIBusClass {
+struct PCIBusClass {
/*< private >*/
BusClass parent_class;
/*< public >*/
int (*bus_num)(PCIBus *bus);
uint16_t (*numa_node)(PCIBus *bus);
-} PCIBusClass;
+};
enum PCIBusFlags {
/* This bus is the root of a PCI domain */
PCI_BUS_IS_ROOT = 0x0001,
/* PCIe extended configuration space is accessible on this bus */
PCI_BUS_EXTENDED_CONFIG_SPACE = 0x0002,
+ /* This is a CXL Type BUS */
+ PCI_BUS_CXL = 0x0004,
};
+#define PCI_NO_PASID UINT32_MAX
+
struct PCIBus {
BusState qbus;
enum PCIBusFlags flags;
- PCIIOMMUFunc iommu_fn;
+ const PCIIOMMUOps *iommu_ops;
void *iommu_opaque;
uint8_t devfn_min;
uint32_t slot_reserved_mask;
@@ -53,6 +57,11 @@ struct PCIBus {
Notifier machine_done;
};
+static inline bool pci_bus_is_cxl(PCIBus *bus)
+{
+ return !!(bus->flags & PCI_BUS_CXL);
+}
+
static inline bool pci_bus_is_root(PCIBus *bus)
{
return !!(bus->flags & PCI_BUS_IS_ROOT);
diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h
new file mode 100644
index 0000000000..d3dd0f64b2
--- /dev/null
+++ b/include/hw/pci/pci_device.h
@@ -0,0 +1,350 @@
+#ifndef QEMU_PCI_DEVICE_H
+#define QEMU_PCI_DEVICE_H
+
+#include "hw/pci/pci.h"
+#include "hw/pci/pcie.h"
+
+#define TYPE_PCI_DEVICE "pci-device"
+typedef struct PCIDeviceClass PCIDeviceClass;
+DECLARE_OBJ_CHECKERS(PCIDevice, PCIDeviceClass,
+ PCI_DEVICE, TYPE_PCI_DEVICE)
+
+/*
+ * Implemented by devices that can be plugged on CXL buses. In the spec, this is
+ * actually a "CXL Component, but we name it device to match the PCI naming.
+ */
+#define INTERFACE_CXL_DEVICE "cxl-device"
+
+/* Implemented by devices that can be plugged on PCI Express buses */
+#define INTERFACE_PCIE_DEVICE "pci-express-device"
+
+/* Implemented by devices that can be plugged on Conventional PCI buses */
+#define INTERFACE_CONVENTIONAL_PCI_DEVICE "conventional-pci-device"
+
+struct PCIDeviceClass {
+ DeviceClass parent_class;
+
+ void (*realize)(PCIDevice *dev, Error **errp);
+ PCIUnregisterFunc *exit;
+ PCIConfigReadFunc *config_read;
+ PCIConfigWriteFunc *config_write;
+
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint8_t revision;
+ uint16_t class_id;
+ uint16_t subsystem_vendor_id; /* only for header type = 0 */
+ uint16_t subsystem_id; /* only for header type = 0 */
+
+ const char *romfile; /* rom bar */
+};
+
+enum PCIReqIDType {
+ PCI_REQ_ID_INVALID = 0,
+ PCI_REQ_ID_BDF,
+ PCI_REQ_ID_SECONDARY_BUS,
+ PCI_REQ_ID_MAX,
+};
+typedef enum PCIReqIDType PCIReqIDType;
+
+struct PCIReqIDCache {
+ PCIDevice *dev;
+ PCIReqIDType type;
+};
+typedef struct PCIReqIDCache PCIReqIDCache;
+
+struct PCIDevice {
+ DeviceState qdev;
+ bool partially_hotplugged;
+ bool has_power;
+
+ /* PCI config space */
+ uint8_t *config;
+
+ /*
+ * Used to enable config checks on load. Note that writable bits are
+ * never checked even if set in cmask.
+ */
+ uint8_t *cmask;
+
+ /* Used to implement R/W bytes */
+ uint8_t *wmask;
+
+ /* Used to implement RW1C(Write 1 to Clear) bytes */
+ uint8_t *w1cmask;
+
+ /* Used to allocate config space for capabilities. */
+ uint8_t *used;
+
+ /* the following fields are read only */
+ int32_t devfn;
+ /*
+ * Cached device to fetch requester ID from, to avoid the PCI tree
+ * walking every time we invoke PCI request (e.g., MSI). For
+ * conventional PCI root complex, this field is meaningless.
+ */
+ PCIReqIDCache requester_id_cache;
+ char name[64];
+ PCIIORegion io_regions[PCI_NUM_REGIONS];
+ AddressSpace bus_master_as;
+ MemoryRegion bus_master_container_region;
+ MemoryRegion bus_master_enable_region;
+
+ /* do not access the following fields */
+ PCIConfigReadFunc *config_read;
+ PCIConfigWriteFunc *config_write;
+
+ /* Legacy PCI VGA regions */
+ MemoryRegion *vga_regions[QEMU_PCI_VGA_NUM_REGIONS];
+ bool has_vga;
+
+ /* Current IRQ levels. Used internally by the generic PCI code. */
+ uint8_t irq_state;
+
+ /* Capability bits */
+ uint32_t cap_present;
+
+ /* Offset of MSI-X capability in config space */
+ uint8_t msix_cap;
+
+ /* MSI-X entries */
+ int msix_entries_nr;
+
+ /* Space to store MSIX table & pending bit array */
+ uint8_t *msix_table;
+ uint8_t *msix_pba;
+
+ /* May be used by INTx or MSI during interrupt notification */
+ void *irq_opaque;
+
+ MSITriggerFunc *msi_trigger;
+ MSIPrepareMessageFunc *msi_prepare_message;
+ MSIxPrepareMessageFunc *msix_prepare_message;
+
+ /* MemoryRegion container for msix exclusive BAR setup */
+ MemoryRegion msix_exclusive_bar;
+ /* Memory Regions for MSIX table and pending bit entries. */
+ MemoryRegion msix_table_mmio;
+ MemoryRegion msix_pba_mmio;
+ /* Reference-count for entries actually in use by driver. */
+ unsigned *msix_entry_used;
+ /* MSIX function mask set or MSIX disabled */
+ bool msix_function_masked;
+ /* Version id needed for VMState */
+ int32_t version_id;
+
+ /* Offset of MSI capability in config space */
+ uint8_t msi_cap;
+
+ /* PCI Express */
+ PCIExpressDevice exp;
+
+ /* SHPC */
+ SHPCDevice *shpc;
+
+ /* Location of option rom */
+ char *romfile;
+ uint32_t romsize;
+ bool has_rom;
+ MemoryRegion rom;
+ uint32_t rom_bar;
+
+ /* INTx routing notifier */
+ PCIINTxRoutingNotifier intx_routing_notifier;
+
+ /* MSI-X notifiers */
+ MSIVectorUseNotifier msix_vector_use_notifier;
+ MSIVectorReleaseNotifier msix_vector_release_notifier;
+ MSIVectorPollNotifier msix_vector_poll_notifier;
+
+ /* ID of standby device in net_failover pair */
+ char *failover_pair_id;
+ uint32_t acpi_index;
+};
+
+static inline int pci_intx(PCIDevice *pci_dev)
+{
+ return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
+}
+
+static inline int pci_is_cxl(const PCIDevice *d)
+{
+ return d->cap_present & QEMU_PCIE_CAP_CXL;
+}
+
+static inline int pci_is_express(const PCIDevice *d)
+{
+ return d->cap_present & QEMU_PCI_CAP_EXPRESS;
+}
+
+static inline int pci_is_express_downstream_port(const PCIDevice *d)
+{
+ uint8_t type;
+
+ if (!pci_is_express(d) || !d->exp.exp_cap) {
+ return 0;
+ }
+
+ type = pcie_cap_get_type(d);
+
+ return type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_ROOT_PORT;
+}
+
+static inline int pci_is_vf(const PCIDevice *d)
+{
+ return d->exp.sriov_vf.pf != NULL;
+}
+
+static inline uint32_t pci_config_size(const PCIDevice *d)
+{
+ return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
+}
+
+static inline uint16_t pci_get_bdf(PCIDevice *dev)
+{
+ return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
+}
+
+uint16_t pci_requester_id(PCIDevice *dev);
+
+/* DMA access functions */
+static inline AddressSpace *pci_get_address_space(PCIDevice *dev)
+{
+ return &dev->bus_master_as;
+}
+
+/**
+ * pci_dma_rw: Read from or write to an address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
+ */
+static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
+{
+ return dma_memory_rw(pci_get_address_space(dev), addr, buf, len,
+ dir, attrs);
+}
+
+/**
+ * pci_dma_read: Read from an address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ */
+static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return pci_dma_rw(dev, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
+}
+
+/**
+ * pci_dma_write: Write to address space from PCI device.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @dev: #PCIDevice doing the memory access
+ * @addr: address within the #PCIDevice address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return pci_dma_rw(dev, addr, (void *) buf, len,
+ DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED);
+}
+
+#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
+ static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, \
+ uint##_bits##_t *val, \
+ MemTxAttrs attrs) \
+ { \
+ return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ } \
+ static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+ MemTxAttrs attrs) \
+ { \
+ return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \
+ }
+
+PCI_DMA_DEFINE_LDST(ub, b, 8);
+PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
+PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
+PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
+PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
+PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
+PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
+
+#undef PCI_DMA_DEFINE_LDST
+
+/**
+ * pci_dma_map: Map device PCI address space range into host virtual address
+ * @dev: #PCIDevice to be accessed
+ * @addr: address within that device's address space
+ * @plen: pointer to length of buffer; updated on return to indicate
+ * if only a subset of the requested range has been mapped
+ * @dir: indicates the transfer direction
+ *
+ * Return: A host pointer, or %NULL if the resources needed to
+ * perform the mapping are exhausted (in that case *@plen
+ * is set to zero).
+ */
+static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
+ dma_addr_t *plen, DMADirection dir)
+{
+ return dma_memory_map(pci_get_address_space(dev), addr, plen, dir,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ dma_memory_unmap(pci_get_address_space(dev), buffer, len, dir, access_len);
+}
+
+static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
+ int alloc_hint)
+{
+ qemu_sglist_init(qsg, DEVICE(dev), alloc_hint, pci_get_address_space(dev));
+}
+
+extern const VMStateDescription vmstate_pci_device;
+
+#define VMSTATE_PCI_DEVICE(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PCIDevice), \
+ .vmsd = &vmstate_pci_device, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PCIDevice), \
+}
+
+#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(PCIDevice), \
+ .vmsd = &vmstate_pci_device, \
+ .flags = VMS_STRUCT | VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
+}
+
+#endif
diff --git a/include/hw/pci/pci_host.h b/include/hw/pci/pci_host.h
index 6210a7e14d..e52d8ec2cd 100644
--- a/include/hw/pci/pci_host.h
+++ b/include/hw/pci/pci_host.h
@@ -29,14 +29,12 @@
#define PCI_HOST_H
#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define PCI_HOST_BYPASS_IOMMU "bypass-iommu"
#define TYPE_PCI_HOST_BRIDGE "pci-host-bridge"
-#define PCI_HOST_BRIDGE(obj) \
- OBJECT_CHECK(PCIHostState, (obj), TYPE_PCI_HOST_BRIDGE)
-#define PCI_HOST_BRIDGE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIHostBridgeClass, (klass), TYPE_PCI_HOST_BRIDGE)
-#define PCI_HOST_BRIDGE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIHostBridgeClass, (obj), TYPE_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_TYPE(PCIHostState, PCIHostBridgeClass, PCI_HOST_BRIDGE)
struct PCIHostState {
SysBusDevice busdev;
@@ -47,15 +45,16 @@ struct PCIHostState {
uint32_t config_reg;
bool mig_enabled;
PCIBus *bus;
+ bool bypass_iommu;
QLIST_ENTRY(PCIHostState) next;
};
-typedef struct PCIHostBridgeClass {
+struct PCIHostBridgeClass {
SysBusDeviceClass parent_class;
const char *(*root_bus_path)(PCIHostState *, PCIBus *);
-} PCIHostBridgeClass;
+};
/* common internal helpers for PCI/PCIe hosts, cut off overflows */
void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index 11f8ab7149..f1a53fea8d 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -26,6 +26,7 @@
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_SAS 0x0107
#define PCI_CLASS_STORAGE_EXPRESS 0x0108
+#define PCI_CLASS_STORAGE_UFS 0x0109
#define PCI_CLASS_STORAGE_OTHER 0x0180
#define PCI_BASE_CLASS_NETWORK 0x02
@@ -53,6 +54,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -156,6 +158,9 @@
/* Vendors and devices. Sort key: vendor first, device next. */
+/* Ref: PCIe r6.0 Table 6-32 */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
+
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C810 0x0001
#define PCI_DEVICE_ID_LSI_53C895A 0x0012
@@ -165,7 +170,6 @@
#define PCI_VENDOR_ID_DEC 0x1011
#define PCI_DEVICE_ID_DEC_21143 0x0019
-#define PCI_DEVICE_ID_DEC_21154 0x0026
#define PCI_VENDOR_ID_CIRRUS 0x1013
@@ -175,6 +179,8 @@
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#define PCI_VENDOR_ID_HP 0x103c
+
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_VENDOR_ID_MOTOROLA 0x1057
@@ -192,6 +198,9 @@
#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+#define PCI_VENDOR_ID_ORACLE 0x108e
+#define PCI_DEVICE_ID_REMOTE_IOHUB 0xb000
+
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_DEVICE_ID_CMD_646 0x0646
@@ -201,14 +210,17 @@
#define PCI_VENDOR_ID_XILINX 0x10ee
#define PCI_VENDOR_ID_VIA 0x1106
-#define PCI_DEVICE_ID_VIA_ISA_BRIDGE 0x0686
+#define PCI_DEVICE_ID_VIA_82C686B_ISA 0x0686
#define PCI_DEVICE_ID_VIA_IDE 0x0571
#define PCI_DEVICE_ID_VIA_UHCI 0x3038
-#define PCI_DEVICE_ID_VIA_ACPI 0x3057
+#define PCI_DEVICE_ID_VIA_82C686B_PM 0x3057
#define PCI_DEVICE_ID_VIA_AC97 0x3058
#define PCI_DEVICE_ID_VIA_MC97 0x3068
+#define PCI_DEVICE_ID_VIA_8231_ISA 0x8231
+#define PCI_DEVICE_ID_VIA_8231_PM 0x8235
#define PCI_VENDOR_ID_MARVELL 0x11ab
+#define PCI_DEVICE_ID_MARVELL_MV6436X 0x6460
#define PCI_VENDOR_ID_SILICON_MOTION 0x126f
#define PCI_DEVICE_ID_SM501 0x0501
@@ -221,6 +233,9 @@
#define PCI_VENDOR_ID_FREESCALE 0x1957
#define PCI_DEVICE_ID_MPC8533E 0x0030
+#define PCI_VENDOR_ID_BAIDU 0x1d22
+#define PCI_DEVICE_ID_KUNLUN_VF 0x3685
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82378 0x0484
#define PCI_DEVICE_ID_INTEL_82441 0x1237
@@ -228,6 +243,7 @@
#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
#define PCI_DEVICE_ID_INTEL_82801D 0x24CD
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_NVME 0x5845
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/hw/pci/pci_regs.h b/include/hw/pci/pci_regs.h
index 77ba64b931..a590140962 100644
--- a/include/hw/pci/pci_regs.h
+++ b/include/hw/pci/pci_regs.h
@@ -4,5 +4,6 @@
#include "standard-headers/linux/pci_regs.h"
#define PCI_PM_CAP_VER_1_1 0x0002 /* PCI PM spec ver. 1.1 */
+#define PCI_PM_CAP_VER_1_2 0x0003 /* PCI PM spec ver. 1.2 */
#endif
diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h
index 14c58ebdb6..11f5a91bbb 100644
--- a/include/hw/pci/pcie.h
+++ b/include/hw/pci/pcie.h
@@ -24,17 +24,10 @@
#include "hw/pci/pci_regs.h"
#include "hw/pci/pcie_regs.h"
#include "hw/pci/pcie_aer.h"
+#include "hw/pci/pcie_sriov.h"
#include "hw/hotplug.h"
typedef enum {
- /* for attention and power indicator */
- PCI_EXP_HP_IND_RESERVED = PCI_EXP_SLTCTL_IND_RESERVED,
- PCI_EXP_HP_IND_ON = PCI_EXP_SLTCTL_IND_ON,
- PCI_EXP_HP_IND_BLINK = PCI_EXP_SLTCTL_IND_BLINK,
- PCI_EXP_HP_IND_OFF = PCI_EXP_SLTCTL_IND_OFF,
-} PCIExpressIndicator;
-
-typedef enum {
/* these bits must match the bits in Slot Control/Status registers.
* PCI_EXP_HP_EV_xxx = PCI_EXP_SLTCTL_xxxE = PCI_EXP_SLTSTA_xxx
*
@@ -81,6 +74,11 @@ struct PCIExpressDevice {
/* ACS */
uint16_t acs_cap;
+
+ /* SR/IOV */
+ uint16_t sriov_cap;
+ PCIESriovPF sriov_pf;
+ PCIESriovVF sriov_vf;
};
#define COMPAT_PROP_PCP "power_controller_present"
@@ -95,6 +93,7 @@ void pcie_cap_exit(PCIDevice *dev);
int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset);
void pcie_cap_v1_exit(PCIDevice *dev);
uint8_t pcie_cap_get_type(const PCIDevice *dev);
+uint8_t pcie_cap_get_version(const PCIDevice *dev);
void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector);
uint8_t pcie_cap_flags_get_vector(PCIDevice *dev);
@@ -112,6 +111,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len);
int pcie_cap_slot_post_load(void *opaque, int version_id);
void pcie_cap_slot_push_attention_button(PCIDevice *dev);
+void pcie_cap_slot_enable_power(PCIDevice *dev);
void pcie_cap_root_init(PCIDevice *dev);
void pcie_cap_root_reset(PCIDevice *dev);
@@ -135,9 +135,9 @@ void pcie_sync_bridge_lnk(PCIDevice *dev);
void pcie_acs_init(PCIDevice *dev, uint16_t offset);
void pcie_acs_reset(PCIDevice *dev);
-void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
+void pcie_ari_init(PCIDevice *dev, uint16_t offset);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
-void pcie_ats_init(PCIDevice *dev, uint16_t offset);
+void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned);
void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h
index 65e71d98fe..4a9f0ea69d 100644
--- a/include/hw/pci/pcie_aer.h
+++ b/include/hw/pci/pcie_aer.h
@@ -40,7 +40,7 @@ struct PCIEAERLog {
* The specified value will be clipped down to PCIE_AER_LOG_MAX_LIMIT
* to avoid unreasonable memory usage.
* I bet that 128 log size would be big enough, otherwise too many errors
- * for system to function normaly. But could consecutive errors occur?
+ * for system to function normally. But could consecutive errors occur?
*/
#define PCIE_AER_LOG_MAX_DEFAULT 8
#define PCIE_AER_LOG_MAX_LIMIT 128
@@ -100,4 +100,5 @@ void pcie_aer_root_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len,
uint32_t root_cmd_prev);
+int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err);
#endif /* QEMU_PCIE_AER_H */
diff --git a/include/hw/pci/pcie_doe.h b/include/hw/pci/pcie_doe.h
new file mode 100644
index 0000000000..87dc17dcef
--- /dev/null
+++ b/include/hw/pci/pcie_doe.h
@@ -0,0 +1,122 @@
+/*
+ * PCIe Data Object Exchange
+ *
+ * Copyright (C) 2021 Avery Design Systems, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef PCIE_DOE_H
+#define PCIE_DOE_H
+
+#include "qemu/range.h"
+#include "hw/register.h"
+
+/*
+ * Reference:
+ * PCIe r6.0 - 7.9.24 Data Object Exchange Extended Capability
+ */
+/* Capabilities Register - r6.0 7.9.24.2 */
+#define PCI_EXP_DOE_CAP 0x04
+REG32(PCI_DOE_CAP_REG, 0)
+ FIELD(PCI_DOE_CAP_REG, INTR_SUPP, 0, 1)
+ FIELD(PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM, 1, 11)
+
+/* Control Register - r6.0 7.9.24.3 */
+#define PCI_EXP_DOE_CTRL 0x08
+REG32(PCI_DOE_CAP_CONTROL, 0)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_ABORT, 0, 1)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_INTR_EN, 1, 1)
+ FIELD(PCI_DOE_CAP_CONTROL, DOE_GO, 31, 1)
+
+/* Status Register - r6.0 7.9.24.4 */
+#define PCI_EXP_DOE_STATUS 0x0c
+REG32(PCI_DOE_CAP_STATUS, 0)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_BUSY, 0, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_INTR_STATUS, 1, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DOE_ERROR, 2, 1)
+ FIELD(PCI_DOE_CAP_STATUS, DATA_OBJ_RDY, 31, 1)
+
+/* Write Data Mailbox Register - r6.0 7.9.24.5 */
+#define PCI_EXP_DOE_WR_DATA_MBOX 0x10
+
+/* Read Data Mailbox Register - 7.9.xx.6 */
+#define PCI_EXP_DOE_RD_DATA_MBOX 0x14
+
+/* PCI-SIG defined Data Object Types - r6.0 Table 6-32 */
+#define PCI_SIG_DOE_DISCOVERY 0x00
+
+#define PCI_DOE_DW_SIZE_MAX (1 << 18)
+#define PCI_DOE_PROTOCOL_NUM_MAX 256
+
+#define DATA_OBJ_BUILD_HEADER1(v, p) (((p) << 16) | (v))
+#define DATA_OBJ_LEN_MASK(len) ((len) & (PCI_DOE_DW_SIZE_MAX - 1))
+
+typedef struct DOEHeader DOEHeader;
+typedef struct DOEProtocol DOEProtocol;
+typedef struct DOECap DOECap;
+
+struct DOEHeader {
+ uint16_t vendor_id;
+ uint8_t data_obj_type;
+ uint8_t reserved;
+ uint32_t length;
+} QEMU_PACKED;
+
+/* Protocol infos and rsp function callback */
+struct DOEProtocol {
+ uint16_t vendor_id;
+ uint8_t data_obj_type;
+ bool (*handle_request)(DOECap *);
+};
+
+struct DOECap {
+ /* Owner */
+ PCIDevice *pdev;
+
+ uint16_t offset;
+
+ struct {
+ bool intr;
+ uint16_t vec;
+ } cap;
+
+ struct {
+ bool abort;
+ bool intr;
+ bool go;
+ } ctrl;
+
+ struct {
+ bool busy;
+ bool intr;
+ bool error;
+ bool ready;
+ } status;
+
+ uint32_t *write_mbox;
+ uint32_t *read_mbox;
+
+ /* Mailbox position indicator */
+ uint32_t read_mbox_idx;
+ uint32_t read_mbox_len;
+ uint32_t write_mbox_len;
+
+ /* Protocols and its callback response */
+ DOEProtocol *protocols;
+ uint16_t protocol_num;
+};
+
+void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset,
+ DOEProtocol *protocols, bool intr, uint16_t vec);
+void pcie_doe_fini(DOECap *doe_cap);
+bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size,
+ uint32_t *buf);
+void pcie_doe_write_config(DOECap *doe_cap, uint32_t addr,
+ uint32_t val, int size);
+uint32_t pcie_doe_build_protocol(DOEProtocol *p);
+void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap);
+void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp);
+uint32_t pcie_doe_get_obj_len(void *obj);
+#endif /* PCIE_DOE_H */
diff --git a/include/hw/pci/pcie_host.h b/include/hw/pci/pcie_host.h
index 3f7b9886d1..82d92177da 100644
--- a/include/hw/pci/pcie_host.h
+++ b/include/hw/pci/pcie_host.h
@@ -23,10 +23,10 @@
#include "hw/pci/pci_host.h"
#include "exec/memory.h"
+#include "qom/object.h"
#define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge"
-#define PCIE_HOST_BRIDGE(obj) \
- OBJECT_CHECK(PCIExpressHost, (obj), TYPE_PCIE_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIExpressHost, PCIE_HOST_BRIDGE)
#define PCIE_HOST_MCFG_BASE "MCFG"
#define PCIE_HOST_MCFG_SIZE "mcfg_size"
@@ -60,15 +60,15 @@ void pcie_host_mmcfg_update(PCIExpressHost *e,
/*
* PCI express ECAM (Enhanced Configuration Address Mapping) format.
* AKA mmcfg address
- * bit 20 - 28: bus number
+ * bit 20 - 27: bus number
* bit 15 - 19: device number
* bit 12 - 14: function number
* bit 0 - 11: offset in configuration space of a given device
*/
-#define PCIE_MMCFG_SIZE_MAX (1ULL << 29)
+#define PCIE_MMCFG_SIZE_MAX (1ULL << 28)
#define PCIE_MMCFG_SIZE_MIN (1ULL << 20)
#define PCIE_MMCFG_BUS_BIT 20
-#define PCIE_MMCFG_BUS_MASK 0x1ff
+#define PCIE_MMCFG_BUS_MASK 0xff
#define PCIE_MMCFG_DEVFN_BIT 12
#define PCIE_MMCFG_DEVFN_MASK 0xff
#define PCIE_MMCFG_CONFOFFSET_MASK 0xfff
diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h
index caae57573b..90e6cf45b8 100644
--- a/include/hw/pci/pcie_port.h
+++ b/include/hw/pci/pcie_port.h
@@ -23,9 +23,11 @@
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci_device.h"
+#include "qom/object.h"
#define TYPE_PCIE_PORT "pcie-port"
-#define PCIE_PORT(obj) OBJECT_CHECK(PCIEPort, (obj), TYPE_PCIE_PORT)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIEPort, PCIE_PORT)
struct PCIEPort {
/*< private >*/
@@ -38,8 +40,12 @@ struct PCIEPort {
void pcie_port_init_reg(PCIDevice *d);
+PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn);
+PCIDevice *pcie_find_port_first(PCIBus *bus);
+int pcie_count_ds_ports(PCIBus *bus);
+
#define TYPE_PCIE_SLOT "pcie-slot"
-#define PCIE_SLOT(obj) OBJECT_CHECK(PCIESlot, (obj), TYPE_PCIE_SLOT)
+OBJECT_DECLARE_SIMPLE_TYPE(PCIESlot, PCIE_SLOT)
struct PCIESlot {
/*< private >*/
@@ -56,8 +62,12 @@ struct PCIESlot {
/* Disable ACS (really for a pcie_root_port) */
bool disable_acs;
- /* Indicates whether hot-plug is enabled on the slot */
+ /* Indicates whether any type of hot-plug is allowed on the slot */
bool hotplug;
+
+ /* broken ACPI hotplug compat knob to preserve 6.1 ABI intact */
+ bool hide_native_hotplug_cap;
+
QLIST_ENTRY(PCIESlot) next;
};
@@ -67,15 +77,14 @@ int pcie_chassis_add_slot(struct PCIESlot *slot);
void pcie_chassis_del_slot(PCIESlot *s);
#define TYPE_PCIE_ROOT_PORT "pcie-root-port-base"
-#define PCIE_ROOT_PORT_CLASS(klass) \
- OBJECT_CLASS_CHECK(PCIERootPortClass, (klass), TYPE_PCIE_ROOT_PORT)
-#define PCIE_ROOT_PORT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCIERootPortClass, (obj), TYPE_PCIE_ROOT_PORT)
+typedef struct PCIERootPortClass PCIERootPortClass;
+DECLARE_CLASS_CHECKERS(PCIERootPortClass, PCIE_ROOT_PORT,
+ TYPE_PCIE_ROOT_PORT)
-typedef struct PCIERootPortClass {
+struct PCIERootPortClass {
PCIDeviceClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
+ ResettablePhases parent_phases;
uint8_t (*aer_vector)(const PCIDevice *dev);
int (*interrupts_init)(PCIDevice *dev, Error **errp);
@@ -86,6 +95,6 @@ typedef struct PCIERootPortClass {
int ssvid_offset;
int acs_offset; /* If nonzero, optional ACS capability offset */
int ssid;
-} PCIERootPortClass;
+};
#endif /* QEMU_PCIE_PORT_H */
diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h
index 1db86b0ec4..9d3b6868dc 100644
--- a/include/hw/pci/pcie_regs.h
+++ b/include/hw/pci/pcie_regs.h
@@ -39,6 +39,8 @@ typedef enum PCIExpLinkSpeed {
QEMU_PCI_EXP_LNK_5GT,
QEMU_PCI_EXP_LNK_8GT,
QEMU_PCI_EXP_LNK_16GT,
+ QEMU_PCI_EXP_LNK_32GT,
+ QEMU_PCI_EXP_LNK_64GT,
} PCIExpLinkSpeed;
#define QEMU_PCI_EXP_LNKCAP_MLS(speed) (speed)
@@ -66,20 +68,6 @@ typedef enum PCIExpLinkWidth {
#define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN)
-#define PCI_EXP_SLTCTL_IND_RESERVED 0x0
-#define PCI_EXP_SLTCTL_IND_ON 0x1
-#define PCI_EXP_SLTCTL_IND_BLINK 0x2
-#define PCI_EXP_SLTCTL_IND_OFF 0x3
-#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC)
-#define PCI_EXP_SLTCTL_AIC_OFF \
- (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT)
-
-#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC)
-#define PCI_EXP_SLTCTL_PIC_OFF \
- (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT)
-#define PCI_EXP_SLTCTL_PIC_ON \
- (PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT)
-
#define PCI_EXP_SLTCTL_SUPPORTED \
(PCI_EXP_SLTCTL_ABPE | \
PCI_EXP_SLTCTL_PDCE | \
@@ -155,6 +143,9 @@ typedef enum PCIExpLinkWidth {
PCI_ERR_UNC_ATOP_EBLOCKED | \
PCI_ERR_UNC_TLP_PRF_BLOCKED)
+#define PCI_ERR_UNC_MASK_DEFAULT (PCI_ERR_UNC_INTN | \
+ PCI_ERR_UNC_TLP_PRF_BLOCKED)
+
#define PCI_ERR_UNC_SEVERITY_DEFAULT (PCI_ERR_UNC_DLP | \
PCI_ERR_UNC_SDN | \
PCI_ERR_UNC_FCP | \
@@ -179,4 +170,8 @@ typedef enum PCIExpLinkWidth {
#define PCI_ACS_VER 0x1
#define PCI_ACS_SIZEOF 8
+/* DOE Capability Register Fields */
+#define PCI_DOE_VER 0x1
+#define PCI_DOE_SIZEOF 24
+
#endif /* QEMU_PCIE_REGS_H */
diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h
new file mode 100644
index 0000000000..b77eb7bf58
--- /dev/null
+++ b/include/hw/pci/pcie_sriov.h
@@ -0,0 +1,82 @@
+/*
+ * pcie_sriov.h:
+ *
+ * Implementation of SR/IOV emulation support.
+ *
+ * Copyright (c) 2015 Knut Omang <knut.omang@oracle.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_PCIE_SRIOV_H
+#define QEMU_PCIE_SRIOV_H
+
+#include "hw/pci/pci.h"
+
+struct PCIESriovPF {
+ uint16_t num_vfs; /* Number of virtual functions created */
+ uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */
+ const char *vfname; /* Reference to the device type used for the VFs */
+ PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */
+};
+
+struct PCIESriovVF {
+ PCIDevice *pf; /* Pointer back to owner physical function */
+ uint16_t vf_number; /* Logical VF number of this function */
+};
+
+void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
+ const char *vfname, uint16_t vf_dev_id,
+ uint16_t init_vfs, uint16_t total_vfs,
+ uint16_t vf_offset, uint16_t vf_stride);
+void pcie_sriov_pf_exit(PCIDevice *dev);
+
+/* Set up a VF bar in the SR/IOV bar area */
+void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
+ uint8_t type, dma_addr_t size);
+
+/* Instantiate a bar for a VF */
+void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
+ MemoryRegion *memory);
+
+/*
+ * Default (minimal) page size support values
+ * as required by the SR/IOV standard:
+ * 0x553 << 12 = 0x553000 = 4K + 8K + 64K + 256K + 1M + 4M
+ */
+#define SRIOV_SUP_PGSIZE_MINREQ 0x553
+
+/*
+ * Optionally add supported page sizes to the mask of supported page sizes
+ * Page size values are interpreted as opt_sup_pgsize << 12.
+ */
+void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize);
+
+/* SR/IOV capability config write handler */
+void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
+ uint32_t val, int len);
+
+/* Reset SR/IOV */
+void pcie_sriov_pf_reset(PCIDevice *dev);
+
+/* Get logical VF number of a VF - only valid for VFs */
+uint16_t pcie_sriov_vf_number(PCIDevice *dev);
+
+/*
+ * Get the physical function that owns this VF.
+ * Returns NULL if dev is not a virtual function
+ */
+PCIDevice *pcie_sriov_get_pf(PCIDevice *dev);
+
+/*
+ * Get the n-th VF of this physical function - only valid for PF.
+ * Returns NULL if index is invalid
+ */
+PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n);
+
+/* Returns the current number of virtual functions. */
+uint16_t pcie_sriov_num_vfs(PCIDevice *dev);
+
+#endif /* QEMU_PCIE_SRIOV_H */
diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h
index d5683b7399..a0789df153 100644
--- a/include/hw/pci/shpc.h
+++ b/include/hw/pci/shpc.h
@@ -3,7 +3,7 @@
#include "exec/memory.h"
#include "hw/hotplug.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "migration/vmstate.h"
struct SHPCDevice {
@@ -52,7 +52,7 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
-extern VMStateInfo shpc_vmstate_info;
+extern const VMStateInfo shpc_vmstate_info;
#define SHPC_VMSTATE(_field, _type, _test) \
VMSTATE_BUFFER_UNSAFE_INFO_TEST(_field, _type, _test, 0, \
shpc_vmstate_info, 0)
diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h
index ebad7bc504..ab26802751 100644
--- a/include/hw/pcmcia.h
+++ b/include/hw/pcmcia.h
@@ -4,6 +4,7 @@
/* PCMCIA/Cardbus */
#include "hw/qdev-core.h"
+#include "qom/object.h"
typedef struct PCMCIASocket {
qemu_irq irq;
@@ -11,22 +12,17 @@ typedef struct PCMCIASocket {
} PCMCIASocket;
#define TYPE_PCMCIA_CARD "pcmcia-card"
-#define PCMCIA_CARD(obj) \
- OBJECT_CHECK(PCMCIACardState, (obj), TYPE_PCMCIA_CARD)
-#define PCMCIA_CARD_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PCMCIACardClass, obj, TYPE_PCMCIA_CARD)
-#define PCMCIA_CARD_CLASS(cls) \
- OBJECT_CLASS_CHECK(PCMCIACardClass, cls, TYPE_PCMCIA_CARD)
+OBJECT_DECLARE_TYPE(PCMCIACardState, PCMCIACardClass, PCMCIA_CARD)
-typedef struct PCMCIACardState {
+struct PCMCIACardState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
PCMCIASocket *slot;
-} PCMCIACardState;
+};
-typedef struct PCMCIACardClass {
+struct PCMCIACardClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
@@ -45,24 +41,24 @@ typedef struct PCMCIACardClass {
uint32_t address, uint16_t value);
uint16_t (*io_read)(PCMCIACardState *card, uint32_t address);
void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value);
-} PCMCIACardClass;
+};
-#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
-#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
-#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
-#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
-#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
-#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
-#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
-#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
-#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
-#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
-#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
-#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
-#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
-#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
-#define CISTPL_END 0xff /* Tuple End */
-#define CISTPL_ENDMARK 0xff
+#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
+#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
+#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
+#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
+#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
+#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
+#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
+#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
+#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
+#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
+#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
+#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
+#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
+#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
+#define CISTPL_END 0xff /* Tuple End */
+#define CISTPL_ENDMARK 0xff
/* dscm1xxxx.c */
PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv);
diff --git a/include/hw/platform-bus.h b/include/hw/platform-bus.h
index 19e20c57ce..44f30c5353 100644
--- a/include/hw/platform-bus.h
+++ b/include/hw/platform-bus.h
@@ -11,7 +11,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,16 +23,11 @@
*/
#include "hw/sysbus.h"
+#include "qom/object.h"
-typedef struct PlatformBusDevice PlatformBusDevice;
#define TYPE_PLATFORM_BUS_DEVICE "platform-bus-device"
-#define PLATFORM_BUS_DEVICE(obj) \
- OBJECT_CHECK(PlatformBusDevice, (obj), TYPE_PLATFORM_BUS_DEVICE)
-#define PLATFORM_BUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PlatformBusDeviceClass, (klass), TYPE_PLATFORM_BUS_DEVICE)
-#define PLATFORM_BUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PlatformBusDeviceClass, (obj), TYPE_PLATFORM_BUS_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(PlatformBusDevice, PLATFORM_BUS_DEVICE)
struct PlatformBusDevice {
/*< private >*/
diff --git a/include/hw/ppc/fdt.h b/include/hw/ppc/fdt.h
index a8cd85069f..b56ac2a8cb 100644
--- a/include/hw/ppc/fdt.h
+++ b/include/hw/ppc/fdt.h
@@ -15,10 +15,10 @@
#define _FDT(exp) \
do { \
- int ret = (exp); \
- if (ret < 0) { \
- error_report("error creating device tree: %s: %s", \
- #exp, fdt_strerror(ret)); \
+ int _ret = (exp); \
+ if (_ret < 0) { \
+ error_report("error creating device tree: %s: %s", \
+ #exp, fdt_strerror(_ret)); \
exit(1); \
} \
} while (0)
diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h
index 26cc469de4..4a3f644516 100644
--- a/include/hw/ppc/mac_dbdma.h
+++ b/include/hw/ppc/mac_dbdma.h
@@ -27,6 +27,7 @@
#include "qemu/iov.h"
#include "sysemu/dma.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct DBDMA_io DBDMA_io;
@@ -160,13 +161,14 @@ typedef struct DBDMA_channel {
dbdma_cmd current;
} DBDMA_channel;
-typedef struct {
+struct DBDMAState {
SysBusDevice parent_obj;
MemoryRegion mem;
DBDMA_channel channels[DBDMA_CHANNELS];
QEMUBH *bh;
-} DBDMAState;
+};
+typedef struct DBDMAState DBDMAState;
/* Externally callable functions */
@@ -176,6 +178,6 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
void DBDMA_kick(DBDMAState *dbdma);
#define TYPE_MAC_DBDMA "mac-dbdma"
-#define MAC_DBDMA(obj) OBJECT_CHECK(DBDMAState, (obj), TYPE_MAC_DBDMA)
+OBJECT_DECLARE_SIMPLE_TYPE(DBDMAState, MAC_DBDMA)
#endif
diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h
index db0d29e6c2..9c6af8e207 100644
--- a/include/hw/ppc/openpic.h
+++ b/include/hw/ppc/openpic.h
@@ -3,6 +3,7 @@
#include "hw/sysbus.h"
#include "hw/core/cpu.h"
+#include "qom/object.h"
#define MAX_CPU 32
#define MAX_MSI 8
@@ -13,14 +14,13 @@ enum {
OPENPIC_OUTPUT_INT = 0, /* IRQ */
OPENPIC_OUTPUT_CINT, /* critical IRQ */
OPENPIC_OUTPUT_MCK, /* Machine check event */
- OPENPIC_OUTPUT_DEBUG, /* Inconditional debug event */
+ OPENPIC_OUTPUT_DEBUG, /* Unconditional debug event */
OPENPIC_OUTPUT_RESET, /* Core reset event */
OPENPIC_OUTPUT_NB,
};
typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
-#define OPENPIC_MODEL_RAVEN 0
#define OPENPIC_MODEL_FSL_MPIC_20 1
#define OPENPIC_MODEL_FSL_MPIC_42 2
#define OPENPIC_MODEL_KEYLARGO 3
@@ -31,13 +31,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
#define OPENPIC_MAX_IRQ (OPENPIC_MAX_SRC + OPENPIC_MAX_IPI + \
OPENPIC_MAX_TMR)
-/* Raven */
-#define RAVEN_MAX_CPU 2
-#define RAVEN_MAX_EXT 48
-#define RAVEN_MAX_IRQ 64
-#define RAVEN_MAX_TMR OPENPIC_MAX_TMR
-#define RAVEN_MAX_IPI OPENPIC_MAX_IPI
-
/* KeyLargo */
#define KEYLARGO_MAX_CPU 4
#define KEYLARGO_MAX_EXT 64
@@ -48,14 +41,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines;
/* Timers don't exist but this makes the code happy... */
#define KEYLARGO_TMR_IRQ (KEYLARGO_IPI_IRQ + KEYLARGO_MAX_IPI)
-/* Interrupt definitions */
-#define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */
-#define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */
-#define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */
-#define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */
-/* First doorbell IRQ */
-#define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI))
-
typedef struct FslMpicInfo {
int max_ext;
} FslMpicInfo;
@@ -66,10 +51,11 @@ typedef enum IRQType {
IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
} IRQType;
-/* Round up to the nearest 64 IRQs so that the queue length
+/*
+ * Round up to the nearest 64 IRQs so that the queue length
* won't change when moving between 32 and 64 bit hosts.
*/
-#define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63)
+#define IRQQUEUE_SIZE_BITS ROUND_UP(OPENPIC_MAX_IRQ, 64)
typedef struct IRQQueue {
unsigned long *queue;
@@ -116,8 +102,10 @@ typedef struct OpenPICTimer {
bool qemu_timer_active; /* Is the qemu_timer is running? */
struct QEMUTimer *qemu_timer;
struct OpenPICState *opp; /* Device timer is part of. */
- /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
- current_count written or read, only defined if qemu_timer_active. */
+ /*
+ * The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
+ * current_count written or read, only defined if qemu_timer_active.
+ */
uint64_t origin_time;
} OpenPICTimer;
@@ -136,9 +124,9 @@ typedef struct IRQDest {
} IRQDest;
#define TYPE_OPENPIC "openpic"
-#define OPENPIC(obj) OBJECT_CHECK(OpenPICState, (obj), TYPE_OPENPIC)
+OBJECT_DECLARE_SIMPLE_TYPE(OpenPICState, OPENPIC)
-typedef struct OpenPICState {
+struct OpenPICState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -183,6 +171,6 @@ typedef struct OpenPICState {
uint32_t irq_ipi0;
uint32_t irq_tim0;
uint32_t irq_msi;
-} OpenPICState;
+};
#endif /* OPENPIC_H */
diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h
index d4b0b0e2ff..476b136146 100644
--- a/include/hw/ppc/pnv.h
+++ b/include/hw/ppc/pnv.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,187 +20,56 @@
#ifndef PPC_PNV_H
#define PPC_PNV_H
+#include "cpu.h"
#include "hw/boards.h"
#include "hw/sysbus.h"
#include "hw/ipmi/ipmi.h"
-#include "hw/ppc/pnv_lpc.h"
#include "hw/ppc/pnv_pnor.h"
-#include "hw/ppc/pnv_psi.h"
-#include "hw/ppc/pnv_occ.h"
-#include "hw/ppc/pnv_homer.h"
-#include "hw/ppc/pnv_xive.h"
-#include "hw/ppc/pnv_core.h"
-#include "hw/pci-host/pnv_phb3.h"
-#include "hw/pci-host/pnv_phb4.h"
#define TYPE_PNV_CHIP "pnv-chip"
-#define PNV_CHIP(obj) OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP)
-#define PNV_CHIP_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvChipClass, (klass), TYPE_PNV_CHIP)
-#define PNV_CHIP_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvChipClass, (obj), TYPE_PNV_CHIP)
-typedef struct PnvChip {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- uint32_t chip_id;
- uint64_t ram_start;
- uint64_t ram_size;
-
- uint32_t nr_cores;
- uint32_t nr_threads;
- uint64_t cores_mask;
- PnvCore **cores;
-
- uint32_t num_phbs;
-
- MemoryRegion xscom_mmio;
- MemoryRegion xscom;
- AddressSpace xscom_as;
-
- gchar *dt_isa_nodename;
-} PnvChip;
-
-#define TYPE_PNV8_CHIP "pnv8-chip"
-#define PNV8_CHIP(obj) OBJECT_CHECK(Pnv8Chip, (obj), TYPE_PNV8_CHIP)
-
-typedef struct Pnv8Chip {
- /*< private >*/
- PnvChip parent_obj;
-
- /*< public >*/
- MemoryRegion icp_mmio;
-
- PnvLpcController lpc;
- Pnv8Psi psi;
- PnvOCC occ;
- PnvHomer homer;
-
-#define PNV8_CHIP_PHB3_MAX 4
- PnvPHB3 phbs[PNV8_CHIP_PHB3_MAX];
-
- XICSFabric *xics;
-} Pnv8Chip;
-
-#define TYPE_PNV9_CHIP "pnv9-chip"
-#define PNV9_CHIP(obj) OBJECT_CHECK(Pnv9Chip, (obj), TYPE_PNV9_CHIP)
-
-typedef struct Pnv9Chip {
- /*< private >*/
- PnvChip parent_obj;
-
- /*< public >*/
- PnvXive xive;
- Pnv9Psi psi;
- PnvLpcController lpc;
- PnvOCC occ;
- PnvHomer homer;
-
- uint32_t nr_quads;
- PnvQuad *quads;
-
-#define PNV9_CHIP_MAX_PEC 3
- PnvPhb4PecState pecs[PNV9_CHIP_MAX_PEC];
-} Pnv9Chip;
-
-/*
- * A SMT8 fused core is a pair of SMT4 cores.
- */
-#define PNV9_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
-#define PNV9_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
-
-#define TYPE_PNV10_CHIP "pnv10-chip"
-#define PNV10_CHIP(obj) OBJECT_CHECK(Pnv10Chip, (obj), TYPE_PNV10_CHIP)
-
-typedef struct Pnv10Chip {
- /*< private >*/
- PnvChip parent_obj;
-
- /*< public >*/
- Pnv9Psi psi;
- PnvLpcController lpc;
-} Pnv10Chip;
-
-typedef struct PnvChipClass {
- /*< private >*/
- SysBusDeviceClass parent_class;
-
- /*< public >*/
- uint64_t chip_cfam_id;
- uint64_t cores_mask;
- uint32_t num_phbs;
-
- DeviceRealize parent_realize;
-
- uint32_t (*core_pir)(PnvChip *chip, uint32_t core_id);
- void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
- void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu);
- void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu);
- void (*intc_print_info)(PnvChip *chip, PowerPCCPU *cpu, Monitor *mon);
- ISABus *(*isa_create)(PnvChip *chip, Error **errp);
- void (*dt_populate)(PnvChip *chip, void *fdt);
- void (*pic_print_info)(PnvChip *chip, Monitor *mon);
- uint64_t (*xscom_core_base)(PnvChip *chip, uint32_t core_id);
- uint32_t (*xscom_pcba)(PnvChip *chip, uint64_t addr);
-} PnvChipClass;
+typedef struct PnvCore PnvCore;
+typedef struct PnvChip PnvChip;
+typedef struct Pnv8Chip Pnv8Chip;
+typedef struct Pnv9Chip Pnv9Chip;
+typedef struct Pnv10Chip Pnv10Chip;
#define PNV_CHIP_TYPE_SUFFIX "-" TYPE_PNV_CHIP
#define PNV_CHIP_TYPE_NAME(cpu_model) cpu_model PNV_CHIP_TYPE_SUFFIX
#define TYPE_PNV_CHIP_POWER8E PNV_CHIP_TYPE_NAME("power8e_v2.1")
-#define PNV_CHIP_POWER8E(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8E)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8E,
+ TYPE_PNV_CHIP_POWER8E)
#define TYPE_PNV_CHIP_POWER8 PNV_CHIP_TYPE_NAME("power8_v2.0")
-#define PNV_CHIP_POWER8(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8,
+ TYPE_PNV_CHIP_POWER8)
#define TYPE_PNV_CHIP_POWER8NVL PNV_CHIP_TYPE_NAME("power8nvl_v1.0")
-#define PNV_CHIP_POWER8NVL(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER8NVL)
-
-#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.0")
-#define PNV_CHIP_POWER9(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER9)
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER8NVL,
+ TYPE_PNV_CHIP_POWER8NVL)
-#define TYPE_PNV_CHIP_POWER10 PNV_CHIP_TYPE_NAME("power10_v1.0")
-#define PNV_CHIP_POWER10(obj) \
- OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP_POWER10)
+#define TYPE_PNV_CHIP_POWER9 PNV_CHIP_TYPE_NAME("power9_v2.2")
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9,
+ TYPE_PNV_CHIP_POWER9)
-/*
- * This generates a HW chip id depending on an index, as found on a
- * two socket system with dual chip modules :
- *
- * 0x0, 0x1, 0x10, 0x11
- *
- * 4 chips should be the maximum
- *
- * TODO: use a machine property to define the chip ids
- */
-#define PNV_CHIP_HWID(i) ((((i) & 0x3e) << 3) | ((i) & 0x1))
-
-/*
- * Converts back a HW chip id to an index. This is useful to calculate
- * the MMIO addresses of some controllers which depend on the chip id.
- */
-#define PNV_CHIP_INDEX(chip) \
- (((chip)->chip_id >> 2) * 2 + ((chip)->chip_id & 0x3))
+#define TYPE_PNV_CHIP_POWER10 PNV_CHIP_TYPE_NAME("power10_v2.0")
+DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10,
+ TYPE_PNV_CHIP_POWER10)
+PnvCore *pnv_chip_find_core(PnvChip *chip, uint32_t core_id);
PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir);
-#define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv")
-#define PNV_MACHINE(obj) \
- OBJECT_CHECK(PnvMachineState, (obj), TYPE_PNV_MACHINE)
-#define PNV_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvMachineClass, obj, TYPE_PNV_MACHINE)
-#define PNV_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvMachineClass, klass, TYPE_PNV_MACHINE)
+typedef struct PnvPHB PnvPHB;
+#define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv")
+typedef struct PnvMachineClass PnvMachineClass;
typedef struct PnvMachineState PnvMachineState;
+DECLARE_OBJ_CHECKERS(PnvMachineState, PnvMachineClass,
+ PNV_MACHINE, TYPE_PNV_MACHINE)
+
-typedef struct PnvMachineClass {
+struct PnvMachineClass {
/*< private >*/
MachineClass parent_class;
@@ -209,7 +78,8 @@ typedef struct PnvMachineClass {
int compat_size;
void (*dt_power_mgt)(PnvMachineState *pnv, void *fdt);
-} PnvMachineClass;
+ void (*i2c_init)(PnvMachineState *pnv);
+};
struct PnvMachineState {
/*< private >*/
@@ -232,6 +102,9 @@ struct PnvMachineState {
hwaddr fw_load_addr;
};
+PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id);
+PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb);
+
#define PNV_FDT_ADDR 0x01000000
#define PNV_TIMEBASE_FREQ 512000000ULL
@@ -254,11 +127,11 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
#define PNV_OCC_COMMON_AREA_BASE 0x7fff800000ull
#define PNV_OCC_SENSOR_BASE(chip) (PNV_OCC_COMMON_AREA_BASE + \
- PNV_OCC_SENSOR_DATA_BLOCK_BASE(PNV_CHIP_INDEX(chip)))
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
#define PNV_HOMER_SIZE 0x0000000000400000ull
#define PNV_HOMER_BASE(chip) \
- (0x7ffd800000ull + ((uint64_t)PNV_CHIP_INDEX(chip)) * PNV_HOMER_SIZE)
+ (0x7ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE)
/*
@@ -277,16 +150,16 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
*/
#define PNV_ICP_SIZE 0x0000000000100000ull
#define PNV_ICP_BASE(chip) \
- (0x0003ffff80000000ull + (uint64_t) PNV_CHIP_INDEX(chip) * PNV_ICP_SIZE)
+ (0x0003ffff80000000ull + (uint64_t) (chip)->chip_id * PNV_ICP_SIZE)
#define PNV_PSIHB_SIZE 0x0000000000100000ull
#define PNV_PSIHB_BASE(chip) \
- (0x0003fffe80000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * PNV_PSIHB_SIZE)
+ (0x0003fffe80000000ull + (uint64_t)(chip)->chip_id * PNV_PSIHB_SIZE)
#define PNV_PSIHB_FSP_SIZE 0x0000000100000000ull
#define PNV_PSIHB_FSP_BASE(chip) \
- (0x0003ffe000000000ull + (uint64_t)PNV_CHIP_INDEX(chip) * \
+ (0x0003ffe000000000ull + (uint64_t)(chip)->chip_id * \
PNV_PSIHB_FSP_SIZE)
/*
@@ -322,11 +195,11 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV9_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
#define PNV9_OCC_COMMON_AREA_BASE 0x203fff800000ull
#define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \
- PNV_OCC_SENSOR_DATA_BLOCK_BASE(PNV_CHIP_INDEX(chip)))
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
#define PNV9_HOMER_SIZE 0x0000000000400000ull
#define PNV9_HOMER_BASE(chip) \
- (0x203ffd800000ull + ((uint64_t)PNV_CHIP_INDEX(chip)) * PNV9_HOMER_SIZE)
+ (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE)
/*
* POWER10 MMIO base addresses - 16TB stride per chip
@@ -340,10 +213,37 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor);
#define PNV10_LPCM_SIZE 0x0000000100000000ull
#define PNV10_LPCM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030000000000ull)
+#define PNV10_XIVE2_IC_SIZE 0x0000000002000000ull
+#define PNV10_XIVE2_IC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030200000000ull)
+
#define PNV10_PSIHB_ESB_SIZE 0x0000000000100000ull
#define PNV10_PSIHB_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030202000000ull)
#define PNV10_PSIHB_SIZE 0x0000000000100000ull
#define PNV10_PSIHB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203000000ull)
+#define PNV10_XIVE2_TM_SIZE 0x0000000000040000ull
+#define PNV10_XIVE2_TM_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030203180000ull)
+
+#define PNV10_XIVE2_NVC_SIZE 0x0000000008000000ull
+#define PNV10_XIVE2_NVC_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006030208000000ull)
+
+#define PNV10_XIVE2_NVPG_SIZE 0x0000010000000000ull
+#define PNV10_XIVE2_NVPG_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006040000000000ull)
+
+#define PNV10_XIVE2_ESB_SIZE 0x0000010000000000ull
+#define PNV10_XIVE2_ESB_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006050000000000ull)
+
+#define PNV10_XIVE2_END_SIZE 0x0000020000000000ull
+#define PNV10_XIVE2_END_BASE(chip) PNV10_CHIP_BASE(chip, 0x0006060000000000ull)
+
+#define PNV10_OCC_COMMON_AREA_SIZE 0x0000000000800000ull
+#define PNV10_OCC_COMMON_AREA_BASE 0x300fff800000ull
+#define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \
+ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id))
+
+#define PNV10_HOMER_SIZE 0x0000000000400000ull
+#define PNV10_HOMER_BASE(chip) \
+ (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE)
+
#endif /* PPC_PNV_H */
diff --git a/include/hw/ppc/pnv_chip.h b/include/hw/ppc/pnv_chip.h
new file mode 100644
index 0000000000..8589f3291e
--- /dev/null
+++ b/include/hw/ppc/pnv_chip.h
@@ -0,0 +1,162 @@
+#ifndef PPC_PNV_CHIP_H
+#define PPC_PNV_CHIP_H
+
+#include "hw/pci-host/pnv_phb4.h"
+#include "hw/ppc/pnv_chiptod.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_homer.h"
+#include "hw/ppc/pnv_n1_chiplet.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/ppc/pnv_occ.h"
+#include "hw/ppc/pnv_psi.h"
+#include "hw/ppc/pnv_sbe.h"
+#include "hw/ppc/pnv_xive.h"
+#include "hw/ppc/pnv_i2c.h"
+#include "hw/sysbus.h"
+
+OBJECT_DECLARE_TYPE(PnvChip, PnvChipClass,
+ PNV_CHIP)
+
+struct PnvChip {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ uint32_t chip_id;
+ uint64_t ram_start;
+ uint64_t ram_size;
+
+ uint32_t nr_cores;
+ uint32_t nr_threads;
+ uint64_t cores_mask;
+ PnvCore **cores;
+
+ uint32_t num_pecs;
+
+ MemoryRegion xscom_mmio;
+ MemoryRegion xscom;
+ AddressSpace xscom_as;
+
+ MemoryRegion *fw_mr;
+ gchar *dt_isa_nodename;
+};
+
+#define TYPE_PNV8_CHIP "pnv8-chip"
+DECLARE_INSTANCE_CHECKER(Pnv8Chip, PNV8_CHIP,
+ TYPE_PNV8_CHIP)
+
+struct Pnv8Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ MemoryRegion icp_mmio;
+
+ PnvLpcController lpc;
+ Pnv8Psi psi;
+ PnvOCC occ;
+ PnvHomer homer;
+
+#define PNV8_CHIP_PHB3_MAX 4
+ /*
+ * The array is used to allow quick access to the phbs by
+ * pnv_ics_get_child() and pnv_ics_resend_child().
+ */
+ PnvPHB *phbs[PNV8_CHIP_PHB3_MAX];
+ uint32_t num_phbs;
+
+ XICSFabric *xics;
+};
+
+#define TYPE_PNV9_CHIP "pnv9-chip"
+DECLARE_INSTANCE_CHECKER(Pnv9Chip, PNV9_CHIP,
+ TYPE_PNV9_CHIP)
+
+struct Pnv9Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ PnvXive xive;
+ Pnv9Psi psi;
+ PnvLpcController lpc;
+ PnvChipTOD chiptod;
+ PnvOCC occ;
+ PnvSBE sbe;
+ PnvHomer homer;
+
+ uint32_t nr_quads;
+ PnvQuad *quads;
+
+#define PNV9_CHIP_MAX_PEC 3
+ PnvPhb4PecState pecs[PNV9_CHIP_MAX_PEC];
+
+#define PNV9_CHIP_MAX_I2C 4
+ PnvI2C i2c[PNV9_CHIP_MAX_I2C];
+};
+
+/*
+ * A SMT8 fused core is a pair of SMT4 cores.
+ */
+#define PNV9_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
+#define PNV9_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
+
+#define TYPE_PNV10_CHIP "pnv10-chip"
+DECLARE_INSTANCE_CHECKER(Pnv10Chip, PNV10_CHIP,
+ TYPE_PNV10_CHIP)
+
+struct Pnv10Chip {
+ /*< private >*/
+ PnvChip parent_obj;
+
+ /*< public >*/
+ PnvXive2 xive;
+ Pnv9Psi psi;
+ PnvLpcController lpc;
+ PnvChipTOD chiptod;
+ PnvOCC occ;
+ PnvSBE sbe;
+ PnvHomer homer;
+ PnvN1Chiplet n1_chiplet;
+
+ uint32_t nr_quads;
+ PnvQuad *quads;
+
+#define PNV10_CHIP_MAX_PEC 2
+ PnvPhb4PecState pecs[PNV10_CHIP_MAX_PEC];
+
+#define PNV10_CHIP_MAX_I2C 4
+ PnvI2C i2c[PNV10_CHIP_MAX_I2C];
+};
+
+#define PNV10_PIR2FUSEDCORE(pir) (((pir) >> 3) & 0xf)
+#define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f)
+
+struct PnvChipClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+
+ /*< public >*/
+ uint64_t chip_cfam_id;
+ uint64_t cores_mask;
+ uint32_t num_pecs;
+ uint32_t num_phbs;
+
+ uint32_t i2c_num_engines;
+ const int *i2c_ports_per_engine;
+
+ DeviceRealize parent_realize;
+
+ uint32_t (*chip_pir)(PnvChip *chip, uint32_t core_id, uint32_t thread_id);
+ void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
+ void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu);
+ void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu);
+ void (*intc_print_info)(PnvChip *chip, PowerPCCPU *cpu, Monitor *mon);
+ ISABus *(*isa_create)(PnvChip *chip, Error **errp);
+ void (*dt_populate)(PnvChip *chip, void *fdt);
+ void (*pic_print_info)(PnvChip *chip, Monitor *mon);
+ uint64_t (*xscom_core_base)(PnvChip *chip, uint32_t core_id);
+ uint32_t (*xscom_pcba)(PnvChip *chip, uint64_t addr);
+};
+
+#endif
diff --git a/include/hw/ppc/pnv_chiptod.h b/include/hw/ppc/pnv_chiptod.h
new file mode 100644
index 0000000000..fde569bcbf
--- /dev/null
+++ b/include/hw/ppc/pnv_chiptod.h
@@ -0,0 +1,53 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of some CHIPTOD behaviour
+ *
+ * Copyright (c) 2022-2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_CHIPTOD_H
+#define PPC_PNV_CHIPTOD_H
+
+#include "qom/object.h"
+
+#define TYPE_PNV_CHIPTOD "pnv-chiptod"
+OBJECT_DECLARE_TYPE(PnvChipTOD, PnvChipTODClass, PNV_CHIPTOD)
+#define TYPE_PNV9_CHIPTOD TYPE_PNV_CHIPTOD "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV9_CHIPTOD, TYPE_PNV9_CHIPTOD)
+#define TYPE_PNV10_CHIPTOD TYPE_PNV_CHIPTOD "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV10_CHIPTOD, TYPE_PNV10_CHIPTOD)
+
+enum tod_state {
+ tod_error = 0,
+ tod_not_set = 7,
+ tod_running = 2,
+ tod_stopped = 1,
+};
+
+typedef struct PnvCore PnvCore;
+
+struct PnvChipTOD {
+ DeviceState xd;
+
+ PnvChip *chip;
+ MemoryRegion xscom_regs;
+
+ bool primary;
+ bool secondary;
+ enum tod_state tod_state;
+ uint64_t tod_error;
+ uint64_t pss_mss_ctrl_reg;
+ PnvCore *slave_pc_target;
+};
+
+struct PnvChipTODClass {
+ DeviceClass parent_class;
+
+ void (*broadcast_ttype)(PnvChipTOD *sender, uint32_t trigger);
+ PnvCore *(*tx_ttype_target)(PnvChipTOD *chiptod, uint64_t val);
+
+ int xscom_size;
+};
+
+#endif /* PPC_PNV_CHIPTOD_H */
diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h
index 113550eb7f..c6d62fd145 100644
--- a/include/hw/ppc/pnv_core.h
+++ b/include/hw/ppc/pnv_core.h
@@ -5,7 +5,7 @@
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2 of
+ * as published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful, but
@@ -22,35 +22,33 @@
#include "hw/cpu/core.h"
#include "target/ppc/cpu.h"
+#include "hw/ppc/pnv.h"
+#include "qom/object.h"
#define TYPE_PNV_CORE "powernv-cpu-core"
-#define PNV_CORE(obj) \
- OBJECT_CHECK(PnvCore, (obj), TYPE_PNV_CORE)
-#define PNV_CORE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvCoreClass, (klass), TYPE_PNV_CORE)
-#define PNV_CORE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvCoreClass, (obj), TYPE_PNV_CORE)
+OBJECT_DECLARE_TYPE(PnvCore, PnvCoreClass,
+ PNV_CORE)
-typedef struct PnvChip PnvChip;
-
-typedef struct PnvCore {
+struct PnvCore {
/*< private >*/
CPUCore parent_obj;
/*< public >*/
PowerPCCPU **threads;
uint32_t pir;
+ uint32_t hwid;
uint64_t hrmor;
PnvChip *chip;
MemoryRegion xscom_regs;
-} PnvCore;
+};
-typedef struct PnvCoreClass {
+struct PnvCoreClass {
DeviceClass parent_class;
const MemoryRegionOps *xscom_ops;
-} PnvCoreClass;
+ uint64_t xscom_size;
+};
#define PNV_CORE_TYPE_SUFFIX "-" TYPE_PNV_CORE
#define PNV_CORE_TYPE_NAME(cpu_model) cpu_model PNV_CORE_TYPE_SUFFIX
@@ -64,14 +62,28 @@ static inline PnvCPUState *pnv_cpu_state(PowerPCCPU *cpu)
return (PnvCPUState *)cpu->machine_data;
}
+struct PnvQuadClass {
+ DeviceClass parent_class;
+
+ const MemoryRegionOps *xscom_ops;
+ uint64_t xscom_size;
+
+ const MemoryRegionOps *xscom_qme_ops;
+ uint64_t xscom_qme_size;
+};
+
#define TYPE_PNV_QUAD "powernv-cpu-quad"
-#define PNV_QUAD(obj) \
- OBJECT_CHECK(PnvQuad, (obj), TYPE_PNV_QUAD)
-typedef struct PnvQuad {
+#define PNV_QUAD_TYPE_SUFFIX "-" TYPE_PNV_QUAD
+#define PNV_QUAD_TYPE_NAME(cpu_model) cpu_model PNV_QUAD_TYPE_SUFFIX
+
+OBJECT_DECLARE_TYPE(PnvQuad, PnvQuadClass, PNV_QUAD)
+
+struct PnvQuad {
DeviceState parent_obj;
- uint32_t id;
+ uint32_t quad_id;
MemoryRegion xscom_regs;
-} PnvQuad;
+ MemoryRegion xscom_qme_regs;
+};
#endif /* PPC_PNV_CORE_H */
diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h
index 1e91c950f6..b1c5d498dc 100644
--- a/include/hw/ppc/pnv_homer.h
+++ b/include/hw/ppc/pnv_homer.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,28 +21,31 @@
#define PPC_PNV_HOMER_H
#include "hw/ppc/pnv.h"
+#include "qom/object.h"
#define TYPE_PNV_HOMER "pnv-homer"
-#define PNV_HOMER(obj) OBJECT_CHECK(PnvHomer, (obj), TYPE_PNV_HOMER)
+OBJECT_DECLARE_TYPE(PnvHomer, PnvHomerClass,
+ PNV_HOMER)
#define TYPE_PNV8_HOMER TYPE_PNV_HOMER "-POWER8"
-#define PNV8_HOMER(obj) OBJECT_CHECK(PnvHomer, (obj), TYPE_PNV8_HOMER)
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV8_HOMER,
+ TYPE_PNV8_HOMER)
#define TYPE_PNV9_HOMER TYPE_PNV_HOMER "-POWER9"
-#define PNV9_HOMER(obj) OBJECT_CHECK(PnvHomer, (obj), TYPE_PNV9_HOMER)
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV9_HOMER,
+ TYPE_PNV9_HOMER)
+#define TYPE_PNV10_HOMER TYPE_PNV_HOMER "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvHomer, PNV10_HOMER,
+ TYPE_PNV10_HOMER)
-typedef struct PnvHomer {
+struct PnvHomer {
DeviceState parent;
- struct PnvChip *chip;
+ PnvChip *chip;
MemoryRegion pba_regs;
MemoryRegion regs;
-} PnvHomer;
+};
-#define PNV_HOMER_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvHomerClass, (klass), TYPE_PNV_HOMER)
-#define PNV_HOMER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvHomerClass, (obj), TYPE_PNV_HOMER)
-typedef struct PnvHomerClass {
+struct PnvHomerClass {
DeviceClass parent_class;
int pba_size;
@@ -51,6 +54,6 @@ typedef struct PnvHomerClass {
const MemoryRegionOps *homer_ops;
hwaddr core_max_base;
-} PnvHomerClass;
+};
#endif /* PPC_PNV_HOMER_H */
diff --git a/include/hw/ppc/pnv_i2c.h b/include/hw/ppc/pnv_i2c.h
new file mode 100644
index 0000000000..1a37730f1e
--- /dev/null
+++ b/include/hw/ppc/pnv_i2c.h
@@ -0,0 +1,38 @@
+/*
+ * QEMU PowerPC PowerNV Processor I2C model
+ *
+ * Copyright (c) 2019-2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_I2C_H
+#define PPC_PNV_I2C_H
+
+#include "hw/ppc/pnv.h"
+#include "hw/i2c/i2c.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_PNV_I2C "pnv-i2c"
+#define PNV_I2C(obj) OBJECT_CHECK(PnvI2C, (obj), TYPE_PNV_I2C)
+
+#define PNV_I2C_REGS 0x20
+
+typedef struct PnvI2C {
+ DeviceState parent;
+
+ struct PnvChip *chip;
+
+ qemu_irq psi_irq;
+
+ uint64_t regs[PNV_I2C_REGS];
+ uint32_t engine;
+ uint32_t num_busses;
+ I2CBus **busses;
+
+ MemoryRegion xscom_regs;
+
+ Fifo8 fifo;
+} PnvI2C;
+
+#endif /* PPC_PNV_I2C_H */
diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h
index c1ec85d5e2..5d22c45570 100644
--- a/include/hw/ppc/pnv_lpc.h
+++ b/include/hw/ppc/pnv_lpc.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV LPC controller
*
- * Copyright (c) 2016, IBM Corporation.
+ * Copyright (c) 2016-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,21 +20,28 @@
#ifndef PPC_PNV_LPC_H
#define PPC_PNV_LPC_H
-#include "hw/ppc/pnv_psi.h"
+#include "exec/memory.h"
+#include "hw/ppc/pnv.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_LPC "pnv-lpc"
-#define PNV_LPC(obj) \
- OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV_LPC)
+typedef struct PnvLpcClass PnvLpcClass;
+typedef struct PnvLpcController PnvLpcController;
+DECLARE_OBJ_CHECKERS(PnvLpcController, PnvLpcClass,
+ PNV_LPC, TYPE_PNV_LPC)
#define TYPE_PNV8_LPC TYPE_PNV_LPC "-POWER8"
-#define PNV8_LPC(obj) OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV8_LPC)
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV8_LPC,
+ TYPE_PNV8_LPC)
#define TYPE_PNV9_LPC TYPE_PNV_LPC "-POWER9"
-#define PNV9_LPC(obj) OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV9_LPC)
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV9_LPC,
+ TYPE_PNV9_LPC)
#define TYPE_PNV10_LPC TYPE_PNV_LPC "-POWER10"
-#define PNV10_LPC(obj) OBJECT_CHECK(PnvLpcController, (obj), TYPE_PNV10_LPC)
+DECLARE_INSTANCE_CHECKER(PnvLpcController, PNV10_LPC,
+ TYPE_PNV10_LPC)
-typedef struct PnvLpcController {
+struct PnvLpcController {
DeviceState parent;
uint64_t eccb_stat_reg;
@@ -78,29 +85,17 @@ typedef struct PnvLpcController {
MemoryRegion xscom_regs;
/* PSI to generate interrupts */
- PnvPsi *psi;
-} PnvLpcController;
+ qemu_irq psi_irq;
+};
-#define PNV_LPC_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvLpcClass, (klass), TYPE_PNV_LPC)
-#define PNV_LPC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvLpcClass, (obj), TYPE_PNV_LPC)
-
-typedef struct PnvLpcClass {
+struct PnvLpcClass {
DeviceClass parent_class;
- int psi_irq;
-
DeviceRealize parent_realize;
-} PnvLpcClass;
-
-/*
- * Old compilers error on typdef forward declarations. Keep them happy.
- */
-struct PnvChip;
+};
ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp);
-int pnv_dt_lpc(struct PnvChip *chip, void *fdt, int root_offset,
+int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset,
uint64_t lpcm_addr, uint64_t lpcm_size);
#endif /* PPC_PNV_LPC_H */
diff --git a/include/hw/ppc/pnv_n1_chiplet.h b/include/hw/ppc/pnv_n1_chiplet.h
new file mode 100644
index 0000000000..a7ad039668
--- /dev/null
+++ b/include/hw/ppc/pnv_n1_chiplet.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU PowerPC N1 chiplet model
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_N1_CHIPLET_H
+#define PPC_PNV_N1_CHIPLET_H
+
+#include "hw/ppc/pnv_nest_pervasive.h"
+
+#define TYPE_PNV_N1_CHIPLET "pnv-N1-chiplet"
+#define PNV_N1_CHIPLET(obj) OBJECT_CHECK(PnvN1Chiplet, (obj), TYPE_PNV_N1_CHIPLET)
+
+typedef struct PnvPbScom {
+ uint64_t mode;
+ uint64_t hp_mode2_curr;
+} PnvPbScom;
+
+typedef struct PnvN1Chiplet {
+ DeviceState parent;
+ MemoryRegion xscom_pb_eq_mr;
+ MemoryRegion xscom_pb_es_mr;
+ PnvNestChipletPervasive nest_pervasive; /* common pervasive chiplet unit */
+#define PNV_PB_SCOM_EQ_SIZE 8
+ PnvPbScom eq[PNV_PB_SCOM_EQ_SIZE];
+#define PNV_PB_SCOM_ES_SIZE 4
+ PnvPbScom es[PNV_PB_SCOM_ES_SIZE];
+} PnvN1Chiplet;
+#endif /*PPC_PNV_N1_CHIPLET_H */
diff --git a/include/hw/ppc/pnv_nest_pervasive.h b/include/hw/ppc/pnv_nest_pervasive.h
new file mode 100644
index 0000000000..73cacf3823
--- /dev/null
+++ b/include/hw/ppc/pnv_nest_pervasive.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU PowerPC nest pervasive common chiplet model
+ *
+ * Copyright (c) 2023, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef PPC_PNV_NEST_CHIPLET_PERVASIVE_H
+#define PPC_PNV_NEST_CHIPLET_PERVASIVE_H
+
+#define TYPE_PNV_NEST_CHIPLET_PERVASIVE "pnv-nest-chiplet-pervasive"
+#define PNV_NEST_CHIPLET_PERVASIVE(obj) OBJECT_CHECK(PnvNestChipletPervasive, (obj), TYPE_PNV_NEST_CHIPLET_PERVASIVE)
+
+typedef struct PnvPervasiveCtrlRegs {
+#define PNV_CPLT_CTRL_SIZE 6
+ uint64_t cplt_ctrl[PNV_CPLT_CTRL_SIZE];
+ uint64_t cplt_cfg0;
+ uint64_t cplt_cfg1;
+ uint64_t cplt_stat0;
+ uint64_t cplt_mask0;
+ uint64_t ctrl_protect_mode;
+ uint64_t ctrl_atomic_lock;
+} PnvPervasiveCtrlRegs;
+
+typedef struct PnvNestChipletPervasive {
+ DeviceState parent;
+ MemoryRegion xscom_ctrl_regs_mr;
+ PnvPervasiveCtrlRegs control_regs;
+} PnvNestChipletPervasive;
+
+#endif /*PPC_PNV_NEST_CHIPLET_PERVASIVE_H */
diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h
index f8d3061419..df321244e3 100644
--- a/include/hw/ppc/pnv_occ.h
+++ b/include/hw/ppc/pnv_occ.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV Emulation of a few OCC related registers
*
- * Copyright (c) 2015-2017, IBM Corporation.
+ * Copyright (c) 2015-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,42 +20,42 @@
#ifndef PPC_PNV_OCC_H
#define PPC_PNV_OCC_H
-#include "hw/ppc/pnv_psi.h"
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_OCC "pnv-occ"
-#define PNV_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV_OCC)
+OBJECT_DECLARE_TYPE(PnvOCC, PnvOCCClass,
+ PNV_OCC)
#define TYPE_PNV8_OCC TYPE_PNV_OCC "-POWER8"
-#define PNV8_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV8_OCC)
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV8_OCC,
+ TYPE_PNV8_OCC)
#define TYPE_PNV9_OCC TYPE_PNV_OCC "-POWER9"
-#define PNV9_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV9_OCC)
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV9_OCC,
+ TYPE_PNV9_OCC)
+#define TYPE_PNV10_OCC TYPE_PNV_OCC "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC)
#define PNV_OCC_SENSOR_DATA_BLOCK_OFFSET 0x00580000
#define PNV_OCC_SENSOR_DATA_BLOCK_SIZE 0x00025800
-typedef struct PnvOCC {
+struct PnvOCC {
DeviceState xd;
/* OCC Misc interrupt */
uint64_t occmisc;
- PnvPsi *psi;
+ qemu_irq psi_irq;
MemoryRegion xscom_regs;
MemoryRegion sram_regs;
-} PnvOCC;
+};
-#define PNV_OCC_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvOCCClass, (klass), TYPE_PNV_OCC)
-#define PNV_OCC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvOCCClass, (obj), TYPE_PNV_OCC)
-
-typedef struct PnvOCCClass {
+struct PnvOCCClass {
DeviceClass parent_class;
int xscom_size;
const MemoryRegionOps *xscom_ops;
- int psi_irq;
-} PnvOCCClass;
+};
#define PNV_OCC_SENSOR_DATA_BLOCK_BASE(i) \
(PNV_OCC_SENSOR_DATA_BLOCK_OFFSET + (i) * PNV_OCC_SENSOR_DATA_BLOCK_SIZE)
diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h
index 4f96abdfb4..2e37ac88bf 100644
--- a/include/hw/ppc/pnv_pnor.h
+++ b/include/hw/ppc/pnv_pnor.h
@@ -6,8 +6,11 @@
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
-#ifndef _PPC_PNV_PNOR_H
-#define _PPC_PNV_PNOR_H
+
+#ifndef PPC_PNV_PNOR_H
+#define PPC_PNV_PNOR_H
+
+#include "hw/sysbus.h"
/*
* PNOR offset on the LPC FW address space
@@ -15,9 +18,9 @@
#define PNOR_SPI_OFFSET 0x0c000000UL
#define TYPE_PNV_PNOR "pnv-pnor"
-#define PNV_PNOR(obj) OBJECT_CHECK(PnvPnor, (obj), TYPE_PNV_PNOR)
+OBJECT_DECLARE_SIMPLE_TYPE(PnvPnor, PNV_PNOR)
-typedef struct PnvPnor {
+struct PnvPnor {
SysBusDevice parent_obj;
BlockBackend *blk;
@@ -25,6 +28,6 @@ typedef struct PnvPnor {
uint8_t *storage;
int64_t size;
MemoryRegion mmio;
-} PnvPnor;
+};
-#endif /* _PPC_PNV_PNOR_H */
+#endif /* PPC_PNV_PNOR_H */
diff --git a/include/hw/ppc/pnv_psi.h b/include/hw/ppc/pnv_psi.h
index 979fc59f33..2a6f715350 100644
--- a/include/hw/ppc/pnv_psi.h
+++ b/include/hw/ppc/pnv_psi.h
@@ -1,12 +1,12 @@
/*
* QEMU PowerPC PowerNV Processor Service Interface (PSI) model
*
- * Copyright (c) 2015-2017, IBM Corporation.
+ * Copyright (c) 2015-2022, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -23,14 +23,15 @@
#include "hw/sysbus.h"
#include "hw/ppc/xics.h"
#include "hw/ppc/xive.h"
+#include "hw/qdev-core.h"
#define TYPE_PNV_PSI "pnv-psi"
-#define PNV_PSI(obj) \
- OBJECT_CHECK(PnvPsi, (obj), TYPE_PNV_PSI)
+OBJECT_DECLARE_TYPE(PnvPsi, PnvPsiClass,
+ PNV_PSI)
#define PSIHB_XSCOM_MAX 0x20
-typedef struct PnvPsi {
+struct PnvPsi {
DeviceState parent;
MemoryRegion regs_mr;
@@ -47,36 +48,30 @@ typedef struct PnvPsi {
uint64_t regs[PSIHB_XSCOM_MAX];
MemoryRegion xscom_regs;
-} PnvPsi;
+};
#define TYPE_PNV8_PSI TYPE_PNV_PSI "-POWER8"
-#define PNV8_PSI(obj) \
- OBJECT_CHECK(Pnv8Psi, (obj), TYPE_PNV8_PSI)
+OBJECT_DECLARE_SIMPLE_TYPE(Pnv8Psi, PNV8_PSI)
-typedef struct Pnv8Psi {
+struct Pnv8Psi {
PnvPsi parent;
ICSState ics;
-} Pnv8Psi;
+};
#define TYPE_PNV9_PSI TYPE_PNV_PSI "-POWER9"
-#define PNV9_PSI(obj) \
- OBJECT_CHECK(Pnv9Psi, (obj), TYPE_PNV9_PSI)
+OBJECT_DECLARE_SIMPLE_TYPE(Pnv9Psi, PNV9_PSI)
-typedef struct Pnv9Psi {
+struct Pnv9Psi {
PnvPsi parent;
XiveSource source;
-} Pnv9Psi;
+};
#define TYPE_PNV10_PSI TYPE_PNV_PSI "-POWER10"
-#define PNV_PSI_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvPsiClass, (klass), TYPE_PNV_PSI)
-#define PNV_PSI_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvPsiClass, (obj), TYPE_PNV_PSI)
-typedef struct PnvPsiClass {
+struct PnvPsiClass {
SysBusDeviceClass parent_class;
uint32_t xscom_pcba;
@@ -84,13 +79,10 @@ typedef struct PnvPsiClass {
uint64_t bar_mask;
const char *compat;
int compat_size;
-
- void (*irq_set)(PnvPsi *psi, int, bool state);
-} PnvPsiClass;
+};
/* The PSI and FSP interrupts are muxed on the same IRQ number */
typedef enum PnvPsiIrq {
- PSIHB_IRQ_PSI, /* internal use only */
PSIHB_IRQ_FSP, /* internal use only */
PSIHB_IRQ_OCC,
PSIHB_IRQ_FSI,
@@ -101,8 +93,6 @@ typedef enum PnvPsiIrq {
#define PSI_NUM_INTERRUPTS 6
-void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state);
-
/* P9 PSI Interrupts */
#define PSIHB9_IRQ_PSI 0
#define PSIHB9_IRQ_OCC 1
diff --git a/include/hw/ppc/pnv_sbe.h b/include/hw/ppc/pnv_sbe.h
new file mode 100644
index 0000000000..b6b378ad14
--- /dev/null
+++ b/include/hw/ppc/pnv_sbe.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU PowerPC PowerNV Emulation of some SBE behaviour
+ *
+ * Copyright (c) 2022, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_PNV_SBE_H
+#define PPC_PNV_SBE_H
+
+#include "exec/memory.h"
+#include "hw/qdev-core.h"
+
+#define TYPE_PNV_SBE "pnv-sbe"
+OBJECT_DECLARE_TYPE(PnvSBE, PnvSBEClass, PNV_SBE)
+#define TYPE_PNV9_SBE TYPE_PNV_SBE "-POWER9"
+DECLARE_INSTANCE_CHECKER(PnvSBE, PNV9_SBE, TYPE_PNV9_SBE)
+#define TYPE_PNV10_SBE TYPE_PNV_SBE "-POWER10"
+DECLARE_INSTANCE_CHECKER(PnvSBE, PNV10_SBE, TYPE_PNV10_SBE)
+
+struct PnvSBE {
+ DeviceState xd;
+
+ uint64_t mbox[8];
+ uint64_t sbe_doorbell;
+ uint64_t host_doorbell;
+
+ qemu_irq psi_irq;
+ QEMUTimer *timer;
+
+ MemoryRegion xscom_mbox_regs;
+ MemoryRegion xscom_ctrl_regs;
+};
+
+struct PnvSBEClass {
+ DeviceClass parent_class;
+
+ int xscom_ctrl_size;
+ int xscom_mbox_size;
+ const MemoryRegionOps *xscom_ctrl_ops;
+ const MemoryRegionOps *xscom_mbox_ops;
+};
+
+#endif /* PPC_PNV_SBE_H */
diff --git a/include/hw/ppc/pnv_xive.h b/include/hw/ppc/pnv_xive.h
index 76cf16f644..9c48430ee4 100644
--- a/include/hw/ppc/pnv_xive.h
+++ b/include/hw/ppc/pnv_xive.h
@@ -10,16 +10,14 @@
#ifndef PPC_PNV_XIVE_H
#define PPC_PNV_XIVE_H
+#include "hw/ppc/pnv.h"
#include "hw/ppc/xive.h"
-
-struct PnvChip;
+#include "qom/object.h"
+#include "hw/ppc/xive2.h"
#define TYPE_PNV_XIVE "pnv-xive"
-#define PNV_XIVE(obj) OBJECT_CHECK(PnvXive, (obj), TYPE_PNV_XIVE)
-#define PNV_XIVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvXiveClass, (klass), TYPE_PNV_XIVE)
-#define PNV_XIVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvXiveClass, (obj), TYPE_PNV_XIVE)
+OBJECT_DECLARE_TYPE(PnvXive, PnvXiveClass,
+ PNV_XIVE)
#define XIVE_BLOCK_MAX 16
@@ -28,11 +26,11 @@ struct PnvChip;
#define XIVE_TABLE_VDT_MAX 16 /* VDT Domain Table (0-15) */
#define XIVE_TABLE_EDT_MAX 64 /* EDT Domain Table (0-63) */
-typedef struct PnvXive {
+struct PnvXive {
XiveRouter parent_obj;
/* Owning chip */
- struct PnvChip *chip;
+ PnvChip *chip;
/* XSCOM addresses giving access to the controller registers */
MemoryRegion xscom_regs;
@@ -87,14 +85,84 @@ typedef struct PnvXive {
uint64_t mig[XIVE_TABLE_MIG_MAX];
uint64_t vdt[XIVE_TABLE_VDT_MAX];
uint64_t edt[XIVE_TABLE_EDT_MAX];
-} PnvXive;
+};
-typedef struct PnvXiveClass {
+struct PnvXiveClass {
XiveRouterClass parent_class;
DeviceRealize parent_realize;
-} PnvXiveClass;
+};
void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon);
+/*
+ * XIVE2 interrupt controller (POWER10)
+ */
+#define TYPE_PNV_XIVE2 "pnv-xive2"
+OBJECT_DECLARE_TYPE(PnvXive2, PnvXive2Class, PNV_XIVE2);
+
+typedef struct PnvXive2 {
+ Xive2Router parent_obj;
+
+ /* Owning chip */
+ PnvChip *chip;
+
+ /* XSCOM addresses giving access to the controller registers */
+ MemoryRegion xscom_regs;
+
+ MemoryRegion ic_mmio;
+ MemoryRegion ic_mmios[8];
+ MemoryRegion esb_mmio;
+ MemoryRegion end_mmio;
+ MemoryRegion nvc_mmio;
+ MemoryRegion nvpg_mmio;
+ MemoryRegion tm_mmio;
+
+ /* Shortcut values for the Main MMIO regions */
+ hwaddr ic_base;
+ uint32_t ic_shift;
+ hwaddr esb_base;
+ uint32_t esb_shift;
+ hwaddr end_base;
+ uint32_t end_shift;
+ hwaddr nvc_base;
+ uint32_t nvc_shift;
+ hwaddr nvpg_base;
+ uint32_t nvpg_shift;
+ hwaddr tm_base;
+ uint32_t tm_shift;
+
+ /* Interrupt controller registers */
+ uint64_t cq_regs[0x40];
+ uint64_t vc_regs[0x100];
+ uint64_t pc_regs[0x100];
+ uint64_t tctxt_regs[0x30];
+
+ /* To change default behavior */
+ uint64_t capabilities;
+ uint64_t config;
+
+ /* Our XIVE source objects for IPIs and ENDs */
+ XiveSource ipi_source;
+ Xive2EndSource end_source;
+
+ /*
+ * Virtual Structure Descriptor tables
+ * These are in a SRAM protected by ECC.
+ */
+ uint64_t vsds[9][XIVE_BLOCK_MAX];
+
+ /* Translation tables */
+ uint64_t tables[8][XIVE_BLOCK_MAX];
+
+} PnvXive2;
+
+typedef struct PnvXive2Class {
+ Xive2RouterClass parent_class;
+
+ DeviceRealize parent_realize;
+} PnvXive2Class;
+
+void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon);
+
#endif /* PPC_PNV_XIVE_H */
diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h
index 09156a5a7a..6209e18492 100644
--- a/include/hw/ppc/pnv_xscom.h
+++ b/include/hw/ppc/pnv_xscom.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,23 +20,22 @@
#ifndef PPC_PNV_XSCOM_H
#define PPC_PNV_XSCOM_H
-#include "qom/object.h"
+#include "exec/memory.h"
+#include "hw/ppc/pnv.h"
typedef struct PnvXScomInterface PnvXScomInterface;
#define TYPE_PNV_XSCOM_INTERFACE "pnv-xscom-interface"
#define PNV_XSCOM_INTERFACE(obj) \
INTERFACE_CHECK(PnvXScomInterface, (obj), TYPE_PNV_XSCOM_INTERFACE)
-#define PNV_XSCOM_INTERFACE_CLASS(klass) \
- OBJECT_CLASS_CHECK(PnvXScomInterfaceClass, (klass), \
+typedef struct PnvXScomInterfaceClass PnvXScomInterfaceClass;
+DECLARE_CLASS_CHECKERS(PnvXScomInterfaceClass, PNV_XSCOM_INTERFACE,
TYPE_PNV_XSCOM_INTERFACE)
-#define PNV_XSCOM_INTERFACE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PnvXScomInterfaceClass, (obj), TYPE_PNV_XSCOM_INTERFACE)
-typedef struct PnvXScomInterfaceClass {
+struct PnvXScomInterfaceClass {
InterfaceClass parent;
int (*dt_xscom)(PnvXScomInterface *dev, void *fdt, int offset);
-} PnvXScomInterfaceClass;
+};
/*
* Layout of the XSCOM PCB addresses of EX core 1 (POWER 8)
@@ -65,6 +64,9 @@ typedef struct PnvXScomInterfaceClass {
#define PNV_XSCOM_PSIHB_BASE 0x2010900
#define PNV_XSCOM_PSIHB_SIZE 0x20
+#define PNV_XSCOM_CHIPTOD_BASE 0x0040000
+#define PNV_XSCOM_CHIPTOD_SIZE 0x31
+
#define PNV_XSCOM_OCC_BASE 0x0066000
#define PNV_XSCOM_OCC_SIZE 0x6000
@@ -91,9 +93,21 @@ typedef struct PnvXScomInterfaceClass {
((uint64_t)(((core) & 0x1C) + 0x40) << 22)
#define PNV9_XSCOM_EQ_SIZE 0x100000
+#define PNV9_XSCOM_I2CM_BASE 0xa0000
+#define PNV9_XSCOM_I2CM_SIZE 0x1000
+
+#define PNV9_XSCOM_CHIPTOD_BASE PNV_XSCOM_CHIPTOD_BASE
+#define PNV9_XSCOM_CHIPTOD_SIZE PNV_XSCOM_CHIPTOD_SIZE
+
#define PNV9_XSCOM_OCC_BASE PNV_XSCOM_OCC_BASE
#define PNV9_XSCOM_OCC_SIZE 0x8000
+#define PNV9_XSCOM_SBE_CTRL_BASE 0x00050008
+#define PNV9_XSCOM_SBE_CTRL_SIZE 0x1
+
+#define PNV9_XSCOM_SBE_MBOX_BASE 0x000D0050
+#define PNV9_XSCOM_SBE_MBOX_SIZE 0x16
+
#define PNV9_XSCOM_PBA_BASE 0x5012b00
#define PNV9_XSCOM_PBA_SIZE 0x40
@@ -122,18 +136,65 @@ typedef struct PnvXScomInterfaceClass {
#define PNV10_XSCOM_EC(proc) \
((0x2 << 16) | ((1 << (3 - (proc))) << 12))
+#define PNV10_XSCOM_QME(chiplet) \
+ (PNV10_XSCOM_EQ(chiplet) | (0xE << 16))
+
+/*
+ * Make the region larger by 0x1000 (instead of starting at an offset) so the
+ * modelled addresses start from 0
+ */
+#define PNV10_XSCOM_QME_BASE(core) \
+ ((uint64_t) PNV10_XSCOM_QME(PNV10_XSCOM_EQ_CHIPLET(core)))
+#define PNV10_XSCOM_QME_SIZE (0x8000 + 0x1000)
+
#define PNV10_XSCOM_EQ_BASE(core) \
((uint64_t) PNV10_XSCOM_EQ(PNV10_XSCOM_EQ_CHIPLET(core)))
-#define PNV10_XSCOM_EQ_SIZE 0x100000
+#define PNV10_XSCOM_EQ_SIZE 0x20000
#define PNV10_XSCOM_EC_BASE(core) \
((uint64_t) PNV10_XSCOM_EQ_BASE(core) | PNV10_XSCOM_EC(core & 0x3))
-#define PNV10_XSCOM_EC_SIZE 0x100000
+#define PNV10_XSCOM_EC_SIZE 0x1000
#define PNV10_XSCOM_PSIHB_BASE 0x3011D00
#define PNV10_XSCOM_PSIHB_SIZE 0x100
-void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp);
+#define PNV10_XSCOM_I2CM_BASE PNV9_XSCOM_I2CM_BASE
+#define PNV10_XSCOM_I2CM_SIZE PNV9_XSCOM_I2CM_SIZE
+
+#define PNV10_XSCOM_CHIPTOD_BASE PNV9_XSCOM_CHIPTOD_BASE
+#define PNV10_XSCOM_CHIPTOD_SIZE PNV9_XSCOM_CHIPTOD_SIZE
+
+#define PNV10_XSCOM_OCC_BASE PNV9_XSCOM_OCC_BASE
+#define PNV10_XSCOM_OCC_SIZE PNV9_XSCOM_OCC_SIZE
+
+#define PNV10_XSCOM_SBE_CTRL_BASE PNV9_XSCOM_SBE_CTRL_BASE
+#define PNV10_XSCOM_SBE_CTRL_SIZE PNV9_XSCOM_SBE_CTRL_SIZE
+
+#define PNV10_XSCOM_SBE_MBOX_BASE PNV9_XSCOM_SBE_MBOX_BASE
+#define PNV10_XSCOM_SBE_MBOX_SIZE PNV9_XSCOM_SBE_MBOX_SIZE
+
+#define PNV10_XSCOM_PBA_BASE 0x01010CDA
+#define PNV10_XSCOM_PBA_SIZE 0x40
+
+#define PNV10_XSCOM_XIVE2_BASE 0x2010800
+#define PNV10_XSCOM_XIVE2_SIZE 0x400
+
+#define PNV10_XSCOM_N1_CHIPLET_CTRL_REGS_BASE 0x3000000
+#define PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE 0x400
+
+#define PNV10_XSCOM_N1_PB_SCOM_EQ_BASE 0x3011000
+#define PNV10_XSCOM_N1_PB_SCOM_EQ_SIZE 0x200
+
+#define PNV10_XSCOM_N1_PB_SCOM_ES_BASE 0x3011300
+#define PNV10_XSCOM_N1_PB_SCOM_ES_SIZE 0x100
+
+#define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */
+#define PNV10_XSCOM_PEC_NEST_SIZE 0x100
+
+#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
+#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
+
+void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr);
int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
uint64_t xscom_base, uint64_t xscom_size,
const char *compat, int compat_size);
@@ -141,7 +202,7 @@ int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
void pnv_xscom_add_subregion(PnvChip *chip, hwaddr offset,
MemoryRegion *mr);
void pnv_xscom_region_init(MemoryRegion *mr,
- struct Object *owner,
+ Object *owner,
const MemoryRegionOps *ops,
void *opaque,
const char *name,
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index 93e614cffd..d5d119ea7f 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -1,11 +1,12 @@
#ifndef HW_PPC_H
#define HW_PPC_H
-#include "target/ppc/cpu-qom.h"
+#include "target/ppc/cpu.h"
void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
PowerPCCPU *ppc_get_vcpu_by_pir(int pir);
int ppc_cpu_pir(PowerPCCPU *cpu);
+int ppc_cpu_tir(PowerPCCPU *cpu);
/* PowerPC hardware exceptions management helpers */
typedef void (*clk_setup_cb)(void *opaque, uint32_t freq);
@@ -53,7 +54,12 @@ struct ppc_tb_t {
*/
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
-clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
+void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq);
+void cpu_ppc_tb_reset(CPUPPCState *env);
+void cpu_ppc_tb_free(CPUPPCState *env);
+void cpu_ppc_hdecr_init(CPUPPCState *env);
+void cpu_ppc_hdecr_exit(CPUPPCState *env);
+
/* Embedded PowerPC DCR management */
typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn);
typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val);
@@ -95,11 +101,11 @@ enum {
ARCH_MAC99_U3,
};
-#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00)
-#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01)
-#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02)
-#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03)
-#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04)
+#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00)
+#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01)
+#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02)
+#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03)
+#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04)
#define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05)
#define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06)
#define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07)
diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h
index cc19c8da5b..1bd9b8821b 100644
--- a/include/hw/ppc/ppc4xx.h
+++ b/include/hw/ppc/ppc4xx.h
@@ -27,35 +27,127 @@
#include "hw/ppc/ppc.h"
#include "exec/memory.h"
+#include "hw/sysbus.h"
-/* PowerPC 4xx core initialization */
-PowerPCCPU *ppc4xx_init(const char *cpu_model,
- clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
- uint32_t sysclk);
+/*
+ * Generic DCR device
+ */
+#define TYPE_PPC4xx_DCR_DEVICE "ppc4xx-dcr-device"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxDcrDeviceState, PPC4xx_DCR_DEVICE);
+struct Ppc4xxDcrDeviceState {
+ SysBusDevice parent_obj;
+
+ PowerPCCPU *cpu;
+};
+
+void ppc4xx_dcr_register(Ppc4xxDcrDeviceState *dev, int dcrn, void *opaque,
+ dcr_read_cb dcr_read, dcr_write_cb dcr_write);
+bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu,
+ Error **errp);
+
+/* Memory Access Layer (MAL) */
+#define TYPE_PPC4xx_MAL "ppc4xx-mal"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxMalState, PPC4xx_MAL);
+struct Ppc4xxMalState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ qemu_irq irqs[4];
+ uint32_t cfg;
+ uint32_t esr;
+ uint32_t ier;
+ uint32_t txcasr;
+ uint32_t txcarr;
+ uint32_t txeobisr;
+ uint32_t txdeir;
+ uint32_t rxcasr;
+ uint32_t rxcarr;
+ uint32_t rxeobisr;
+ uint32_t rxdeir;
+ uint32_t *txctpr;
+ uint32_t *rxctpr;
+ uint32_t *rcbs;
+ uint8_t txcnum;
+ uint8_t rxcnum;
+};
+
+/* Peripheral local bus arbitrer */
+#define TYPE_PPC4xx_PLB "ppc4xx-plb"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxPlbState, PPC4xx_PLB);
+struct Ppc4xxPlbState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ uint32_t acr;
+ uint32_t bear;
+ uint32_t besr;
+};
+
+/* Peripheral controller */
+#define TYPE_PPC4xx_EBC "ppc4xx-ebc"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxEbcState, PPC4xx_EBC);
+struct Ppc4xxEbcState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ uint32_t addr;
+ uint32_t bcr[8];
+ uint32_t bap[8];
+ uint32_t bear;
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t cfg;
+};
+
+/* SDRAM DDR controller */
+typedef struct {
+ MemoryRegion ram;
+ MemoryRegion container; /* used for clipping */
+ hwaddr base;
+ hwaddr size;
+ uint32_t bcr;
+} Ppc4xxSdramBank;
-/* PowerPC 4xx universal interrupt controller */
-enum {
- PPCUIC_OUTPUT_INT = 0,
- PPCUIC_OUTPUT_CINT = 1,
- PPCUIC_OUTPUT_NB,
+#define SDR0_DDR0_DDRM_ENCODE(n) ((((unsigned long)(n)) & 0x03) << 29)
+#define SDR0_DDR0_DDRM_DDR1 0x20000000
+#define SDR0_DDR0_DDRM_DDR2 0x40000000
+
+#define TYPE_PPC4xx_SDRAM_DDR "ppc4xx-sdram-ddr"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdrState, PPC4xx_SDRAM_DDR);
+struct Ppc4xxSdramDdrState {
+ Ppc4xxDcrDeviceState parent_obj;
+
+ MemoryRegion *dram_mr;
+ uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */
+ Ppc4xxSdramBank bank[4];
+ qemu_irq irq;
+
+ uint32_t addr;
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t bear;
+ uint32_t cfg;
+ uint32_t status;
+ uint32_t rtr;
+ uint32_t pmit;
+ uint32_t tr;
+ uint32_t ecccfg;
+ uint32_t eccesr;
};
-qemu_irq *ppcuic_init (CPUPPCState *env, qemu_irq *irqs,
- uint32_t dcr_base, int has_ssr, int has_vr);
-void ppc4xx_sdram_banks(MemoryRegion *ram, int nr_banks,
- MemoryRegion ram_memories[],
- hwaddr ram_bases[], hwaddr ram_sizes[],
- const ram_addr_t sdram_bank_sizes[]);
+void ppc4xx_sdram_ddr_enable(Ppc4xxSdramDdrState *s);
-void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks,
- MemoryRegion ram_memories[],
- hwaddr *ram_bases,
- hwaddr *ram_sizes,
- int do_init);
+/* SDRAM DDR2 controller */
+#define TYPE_PPC4xx_SDRAM_DDR2 "ppc4xx-sdram-ddr2"
+OBJECT_DECLARE_SIMPLE_TYPE(Ppc4xxSdramDdr2State, PPC4xx_SDRAM_DDR2);
+struct Ppc4xxSdramDdr2State {
+ Ppc4xxDcrDeviceState parent_obj;
-void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum,
- qemu_irq irqs[4]);
+ MemoryRegion *dram_mr;
+ uint32_t nbanks; /* Banks to use from 4, e.g. when board has less slots */
+ Ppc4xxSdramBank bank[4];
+
+ uint32_t addr;
+ uint32_t mcopt2;
+};
-#define TYPE_PPC4xx_PCI_HOST_BRIDGE "ppc4xx-pcihost"
+void ppc4xx_sdram_ddr2_enable(Ppc4xxSdramDdr2State *s);
#endif /* PPC4XX_H */
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 3134d339e8..4aaf23d28f 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -8,9 +8,11 @@
#include "hw/mem/pc-dimm.h"
#include "hw/ppc/spapr_ovec.h"
#include "hw/ppc/spapr_irq.h"
+#include "qom/object.h"
#include "hw/ppc/spapr_xive.h" /* For SpaprXive */
#include "hw/ppc/xics.h" /* For ICSState */
#include "hw/ppc/spapr_tpm_proxy.h"
+#include "hw/ppc/spapr_nested.h" /* For SpaprMachineStateNested */
struct SpaprVioBus;
struct SpaprPhbState;
@@ -20,6 +22,8 @@ typedef struct SpaprEventLogEntry SpaprEventLogEntry;
typedef struct SpaprEventSource SpaprEventSource;
typedef struct SpaprPendingHpt SpaprPendingHpt;
+typedef struct Vof Vof;
+
#define HPTE64_V_HPTE_DIRTY 0x0000000000000040ULL
#define SPAPR_ENTRY_POINT 0x100
@@ -27,10 +31,8 @@ typedef struct SpaprPendingHpt SpaprPendingHpt;
#define TYPE_SPAPR_RTC "spapr-rtc"
-#define SPAPR_RTC(obj) \
- OBJECT_CHECK(SpaprRtcState, (obj), TYPE_SPAPR_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprRtcState, SPAPR_RTC)
-typedef struct SpaprRtcState SpaprRtcState;
struct SpaprRtcState {
/*< private >*/
DeviceState parent_obj;
@@ -38,15 +40,9 @@ struct SpaprRtcState {
};
typedef struct SpaprDimmState SpaprDimmState;
-typedef struct SpaprMachineClass SpaprMachineClass;
#define TYPE_SPAPR_MACHINE "spapr-machine"
-#define SPAPR_MACHINE(obj) \
- OBJECT_CHECK(SpaprMachineState, (obj), TYPE_SPAPR_MACHINE)
-#define SPAPR_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprMachineClass, obj, TYPE_SPAPR_MACHINE)
-#define SPAPR_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprMachineClass, klass, TYPE_SPAPR_MACHINE)
+OBJECT_DECLARE_TYPE(SpaprMachineState, SpaprMachineClass, SPAPR_MACHINE)
typedef enum {
SPAPR_RESIZE_HPT_DEFAULT = 0,
@@ -81,8 +77,14 @@ typedef enum {
#define SPAPR_CAP_CCF_ASSIST 0x09
/* Implements PAPR FWNMI option */
#define SPAPR_CAP_FWNMI 0x0A
+/* Support H_RPT_INVALIDATE */
+#define SPAPR_CAP_RPT_INVALIDATE 0x0B
+/* Support for AIL modes */
+#define SPAPR_CAP_AIL_MODE_3 0x0C
+/* Nested PAPR */
+#define SPAPR_CAP_NESTED_PAPR 0x0D
/* Num Caps */
-#define SPAPR_CAP_NUM (SPAPR_CAP_FWNMI + 1)
+#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1)
/*
* Capability Values
@@ -102,7 +104,29 @@ typedef enum {
#define SPAPR_CAP_FIXED_CCD 0x03
#define SPAPR_CAP_FIXED_NA 0x10 /* Lets leave a bit of a gap... */
-#define FDT_MAX_SIZE 0x100000
+#define FDT_MAX_SIZE 0x200000
+
+/* Max number of NUMA nodes */
+#define NUMA_NODES_MAX_NUM (MAX_NODES)
+
+/*
+ * NUMA FORM1 macros. FORM1_DIST_REF_POINTS was taken from
+ * MAX_DISTANCE_REF_POINTS in arch/powerpc/mm/numa.h from Linux
+ * kernel source. It represents the amount of associativity domains
+ * for non-CPU resources.
+ *
+ * FORM1_NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
+ * array for any non-CPU resource.
+ */
+#define FORM1_DIST_REF_POINTS 4
+#define FORM1_NUMA_ASSOC_SIZE (FORM1_DIST_REF_POINTS + 1)
+
+/*
+ * FORM2 NUMA affinity has a single associativity domain, giving
+ * us a assoc size of 2.
+ */
+#define FORM2_DIST_REF_POINTS 1
+#define FORM2_NUMA_ASSOC_SIZE (FORM2_DIST_REF_POINTS + 1)
typedef struct SpaprCapabilities SpaprCapabilities;
struct SpaprCapabilities {
@@ -130,17 +154,33 @@ struct SpaprMachineClass {
bool smp_threads_vsmt; /* set VSMT to smp_threads by default */
hwaddr rma_limit; /* clamp the RMA to this size */
bool pre_5_1_assoc_refpoints;
+ bool pre_5_2_numa_associativity;
+ bool pre_6_2_numa_affinity;
- void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
- uint64_t *buid, hwaddr *pio,
+ bool (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
+ uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
- unsigned n_dma, uint32_t *liobns, hwaddr *nv2gpa,
- hwaddr *nv2atsd, Error **errp);
+ unsigned n_dma, uint32_t *liobns, Error **errp);
SpaprResizeHpt resize_hpt_default;
SpaprCapabilities default_caps;
SpaprIrq *irq;
};
+#define WDT_MAX_WATCHDOGS 4 /* Maximum number of watchdog devices */
+
+#define TYPE_SPAPR_WDT "spapr-wdt"
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprWatchdog, SPAPR_WDT)
+
+typedef struct SpaprWatchdog {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ QEMUTimer timer;
+ uint8_t action; /* One of PSERIES_WDTF_ACTION_xxx */
+ uint8_t leave_others; /* leaveOtherWatchdogsRunningOnTimeout */
+} SpaprWatchdog;
+
/**
* SpaprMachineState:
*/
@@ -156,23 +196,28 @@ struct SpaprMachineState {
SpaprResizeHpt resize_hpt;
void *htab;
uint32_t htab_shift;
- uint64_t patb_entry; /* Process tbl registed in H_REGISTER_PROCESS_TABLE */
+ uint64_t patb_entry; /* Process tbl registered in H_REGISTER_PROC_TBL */
SpaprPendingHpt *pending_hpt; /* in-progress resize */
hwaddr rma_size;
uint32_t fdt_size;
uint32_t fdt_initial_size;
void *fdt_blob;
+ uint8_t fdt_rng_seed[32];
long kernel_size;
bool kernel_le;
uint64_t kernel_addr;
uint32_t initrd_base;
long initrd_size;
+ Vof *vof;
uint64_t rtc_offset; /* Now used only during incoming migration */
struct PPCTimebase tb;
- bool has_graphics;
+ bool want_stdout_path;
uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */
+ /* Nested HV support (TCG only) */
+ SpaprMachineStateNested nested;
+
Notifier epow_notifier;
QTAILQ_HEAD(, SpaprEventLogEntry) pending_events;
bool use_hotplug_event_source;
@@ -211,6 +256,9 @@ struct SpaprMachineState {
int fwnmi_machine_check_interlock;
QemuCond fwnmi_machine_check_interlock_cond;
+ /* Set by -boot */
+ char *boot_device;
+
/*< public >*/
char *kvm_type;
char *host_model;
@@ -227,10 +275,14 @@ struct SpaprMachineState {
bool cmd_line_caps[SPAPR_CAP_NUM];
SpaprCapabilities def, eff, mig;
- unsigned gpu_numa_id;
SpaprTpmProxy *tpm_proxy;
+ uint32_t FORM1_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE];
+ uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE];
+
Error *fwnmi_migration_blocker;
+
+ SpaprWatchdog wds[WDT_MAX_WATCHDOGS];
};
#define H_SUCCESS 0
@@ -311,7 +363,12 @@ struct SpaprMachineState {
#define H_P7 -60
#define H_P8 -61
#define H_P9 -62
+#define H_NOOP -63
+#define H_UNSUPPORTED -67
#define H_OVERLAP -68
+#define H_STATE -75
+#define H_IN_USE -77
+#define H_INVALID_ELEMENT_VALUE -81
#define H_UNSUPPORTED_FLAG -256
#define H_MULTI_THREADS_ACTIVE -9005
@@ -349,7 +406,7 @@ struct SpaprMachineState {
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
-#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_SET_DAWR0 2
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
@@ -381,10 +438,13 @@ struct SpaprMachineState {
#define H_CPU_CHAR_THR_RECONF_TRIG PPC_BIT(6)
#define H_CPU_CHAR_CACHE_COUNT_DIS PPC_BIT(7)
#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST PPC_BIT(9)
+
#define H_CPU_BEHAV_FAVOUR_SECURITY PPC_BIT(0)
#define H_CPU_BEHAV_L1D_FLUSH_PR PPC_BIT(1)
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR PPC_BIT(2)
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE PPC_BIT(5)
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY PPC_BIT(7)
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS PPC_BIT(8)
/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
@@ -524,8 +584,20 @@ struct SpaprMachineState {
#define H_SCM_BIND_MEM 0x3EC
#define H_SCM_UNBIND_MEM 0x3F0
#define H_SCM_UNBIND_ALL 0x3FC
-
-#define MAX_HCALL_OPCODE H_SCM_UNBIND_ALL
+#define H_SCM_HEALTH 0x400
+#define H_RPT_INVALIDATE 0x448
+#define H_SCM_FLUSH 0x44C
+#define H_WATCHDOG 0x45C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_DELETE 0x488
+
+#define MAX_HCALL_OPCODE H_GUEST_DELETE
/* The hcalls above are standardized in PAPR and implemented by pHyp
* as well.
@@ -540,7 +612,16 @@ struct SpaprMachineState {
/* Client Architecture support */
#define KVMPPC_H_CAS (KVMPPC_HCALL_BASE + 0x2)
#define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3)
-#define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT
+/* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */
+#define KVMPPC_H_VOF_CLIENT (KVMPPC_HCALL_BASE + 0x5)
+
+/* Platform-specific hcalls used for nested HV KVM */
+#define KVMPPC_H_SET_PARTITION_TABLE (KVMPPC_HCALL_BASE + 0x800)
+#define KVMPPC_H_ENTER_NESTED (KVMPPC_HCALL_BASE + 0x804)
+#define KVMPPC_H_TLB_INVALIDATE (KVMPPC_HCALL_BASE + 0x808)
+#define KVMPPC_H_COPY_TOFROM_GUEST (KVMPPC_HCALL_BASE + 0x80C)
+
+#define KVMPPC_HCALL_MAX KVMPPC_H_COPY_TOFROM_GUEST
/*
* The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
@@ -550,7 +631,6 @@ struct SpaprMachineState {
#define SVM_H_TPM_COMM 0xEF10
#define SVM_HCALL_MAX SVM_H_TPM_COMM
-
typedef struct SpaprDeviceTreeUpdateHeader {
uint32_t version_id;
} SpaprDeviceTreeUpdateHeader;
@@ -565,13 +645,19 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
target_ulong *args);
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
+void spapr_unregister_hypercall(target_ulong opcode);
target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
target_ulong *args);
-target_ulong do_client_architecture_support(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong addr,
- target_ulong fdt_bufsize);
+target_ulong vhyp_mmu_resize_hpt_prepare(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong shift);
+target_ulong vhyp_mmu_resize_hpt_commit(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong flags,
+ target_ulong shift);
+bool is_ram_address(SpaprMachineState *spapr, hwaddr addr);
+void push_sregs_to_kvm_pr(SpaprMachineState *spapr);
/* Virtual Processor Area structure constants */
#define VPA_MIN_SIZE 640
@@ -633,6 +719,7 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu,
#define RTAS_DDW_PGSIZE_128M 0x20
#define RTAS_DDW_PGSIZE_256M 0x40
#define RTAS_DDW_PGSIZE_16G 0x80
+#define RTAS_DDW_PGSIZE_2M 0x100
/* RTAS tokens */
#define RTAS_TOKEN_BASE 0x2000
@@ -716,7 +803,8 @@ static inline uint64_t ppc64_phys_to_real(uint64_t addr)
static inline uint32_t rtas_ld(target_ulong phys, int n)
{
- return ldl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n));
+ return ldl_be_phys(&address_space_memory,
+ ppc64_phys_to_real(phys + 4 * n));
}
static inline uint64_t rtas_ldq(target_ulong phys, int n)
@@ -726,7 +814,7 @@ static inline uint64_t rtas_ldq(target_ulong phys, int n)
static inline void rtas_st(target_ulong phys, int n, uint32_t val)
{
- stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val);
+ stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4 * n), val);
}
typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
@@ -751,7 +839,7 @@ void spapr_load_rtas(SpaprMachineState *spapr, void *fdt, hwaddr addr);
#define SPAPR_IS_PCI_LIOBN(liobn) (!!((liobn) & 0x80000000))
#define SPAPR_PCI_DMA_WINDOW_NUM(liobn) ((liobn) & 0xff)
-#define RTAS_SIZE 2048
+#define RTAS_MIN_SIZE 20 /* hv_rtas_size in SLOF */
#define RTAS_ERROR_LOG_MAX 2048
/* Offset from rtas-base where error log is placed */
@@ -769,15 +857,13 @@ static inline void spapr_dt_irq(uint32_t *intspec, int irq, bool is_lsi)
intspec[1] = is_lsi ? cpu_to_be32(1) : 0;
}
-typedef struct SpaprTceTable SpaprTceTable;
#define TYPE_SPAPR_TCE_TABLE "spapr-tce-table"
-#define SPAPR_TCE_TABLE(obj) \
- OBJECT_CHECK(SpaprTceTable, (obj), TYPE_SPAPR_TCE_TABLE)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprTceTable, SPAPR_TCE_TABLE)
#define TYPE_SPAPR_IOMMU_MEMORY_REGION "spapr-iommu-memory-region"
-#define SPAPR_IOMMU_MEMORY_REGION(obj) \
- OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_SPAPR_IOMMU_MEMORY_REGION)
+DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, SPAPR_IOMMU_MEMORY_REGION,
+ TYPE_SPAPR_IOMMU_MEMORY_REGION)
struct SpaprTceTable {
DeviceState parent;
@@ -791,6 +877,7 @@ struct SpaprTceTable {
bool bypass;
bool need_vfio;
bool skipping_replay;
+ bool def_win;
int fd;
MemoryRegion root;
IOMMUMemoryRegion iommu;
@@ -813,6 +900,7 @@ void spapr_dt_events(SpaprMachineState *sm, void *fdt);
void close_htab_fd(SpaprMachineState *spapr);
void spapr_setup_hpt(SpaprMachineState *spapr);
void spapr_free_hpt(SpaprMachineState *spapr);
+void spapr_check_mmu_mode(bool guest_radix);
SpaprTceTable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn);
void spapr_tce_table_enable(SpaprTceTable *tcet,
uint32_t page_shift, uint64_t bus_offset,
@@ -825,7 +913,7 @@ int spapr_dma_dt(void *fdt, int node_off, const char *propname,
uint32_t liobn, uint64_t window, uint32_t size);
int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
SpaprTceTable *tcet);
-void spapr_pci_switch_vga(bool big_endian);
+void spapr_pci_switch_vga(SpaprMachineState *spapr, bool big_endian);
void spapr_hotplug_req_add_by_index(SpaprDrc *drc);
void spapr_hotplug_req_remove_by_index(SpaprDrc *drc);
void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type,
@@ -837,10 +925,10 @@ void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
uint32_t count, uint32_t index);
int spapr_hpt_shift_for_ramsize(uint64_t ramsize);
-void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
- Error **errp);
+int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp);
void spapr_clear_pending_events(SpaprMachineState *spapr);
void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr);
+void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev);
int spapr_max_server_number(SpaprMachineState *spapr);
void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
uint64_t pte0, uint64_t pte1);
@@ -894,7 +982,7 @@ void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg);
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
int spapr_get_vcpu_id(PowerPCCPU *cpu);
-void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp);
+bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp);
PowerPCCPU *spapr_find_cpu(int vcpu_id);
int spapr_caps_pre_load(void *opaque);
@@ -911,9 +999,12 @@ extern const VMStateDescription vmstate_spapr_cap_sbbc;
extern const VMStateDescription vmstate_spapr_cap_ibs;
extern const VMStateDescription vmstate_spapr_cap_hpt_maxpagesize;
extern const VMStateDescription vmstate_spapr_cap_nested_kvm_hv;
+extern const VMStateDescription vmstate_spapr_cap_nested_papr;
extern const VMStateDescription vmstate_spapr_cap_large_decr;
extern const VMStateDescription vmstate_spapr_cap_ccf_assist;
extern const VMStateDescription vmstate_spapr_cap_fwnmi;
+extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate;
+extern const VMStateDescription vmstate_spapr_wdt;
static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap)
{
@@ -926,7 +1017,7 @@ void spapr_caps_cpu_apply(SpaprMachineState *spapr, PowerPCCPU *cpu);
void spapr_caps_add_properties(SpaprMachineClass *smc);
int spapr_caps_post_migration(SpaprMachineState *spapr);
-void spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize,
+bool spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize,
Error **errp);
/*
* XIVE definitions
@@ -936,5 +1027,27 @@ void spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize,
#define SPAPR_OV5_XIVE_BOTH 0x80 /* Only to advertise on the platform */
void spapr_set_all_lpcrs(target_ulong value, target_ulong mask);
+void spapr_init_all_lpcrs(target_ulong value, target_ulong mask);
hwaddr spapr_get_rtas_addr(void);
+bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr);
+
+void spapr_vof_reset(SpaprMachineState *spapr, void *fdt, Error **errp);
+void spapr_vof_quiesce(MachineState *ms);
+bool spapr_vof_setprop(MachineState *ms, const char *path, const char *propname,
+ void *val, int vallen);
+target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr,
+ target_ulong opcode, target_ulong *args);
+target_ulong spapr_vof_client_architecture_support(MachineState *ms,
+ CPUState *cs,
+ target_ulong ovec_addr);
+void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt);
+
+/* H_WATCHDOG */
+void spapr_watchdog_init(SpaprMachineState *spapr);
+void spapr_register_nested_hv(void);
+void spapr_unregister_nested_hv(void);
+void spapr_nested_reset(SpaprMachineState *spapr);
+void spapr_register_nested_papr(void);
+void spapr_unregister_nested_papr(void);
+
#endif /* HW_SPAPR_H */
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index 7aed8f555b..69a52e39b8 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -13,18 +13,15 @@
#include "hw/qdev-core.h"
#include "target/ppc/cpu-qom.h"
#include "target/ppc/cpu.h"
+#include "qom/object.h"
#define TYPE_SPAPR_CPU_CORE "spapr-cpu-core"
-#define SPAPR_CPU_CORE(obj) \
- OBJECT_CHECK(SpaprCpuCore, (obj), TYPE_SPAPR_CPU_CORE)
-#define SPAPR_CPU_CORE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprCpuCoreClass, (klass), TYPE_SPAPR_CPU_CORE)
-#define SPAPR_CPU_CORE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprCpuCoreClass, (obj), TYPE_SPAPR_CPU_CORE)
+OBJECT_DECLARE_TYPE(SpaprCpuCore, SpaprCpuCoreClass,
+ SPAPR_CPU_CORE)
#define SPAPR_CPU_CORE_TYPE_NAME(model) model "-" TYPE_SPAPR_CPU_CORE
-typedef struct SpaprCpuCore {
+struct SpaprCpuCore {
/*< private >*/
CPUCore parent_obj;
@@ -32,18 +29,20 @@ typedef struct SpaprCpuCore {
PowerPCCPU **threads;
int node_id;
bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */
-} SpaprCpuCore;
+};
-typedef struct SpaprCpuCoreClass {
+struct SpaprCpuCoreClass {
DeviceClass parent_class;
const char *cpu_type;
-} SpaprCpuCoreClass;
+};
const char *spapr_get_cpu_core_type(const char *cpu_type);
void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip,
target_ulong r1, target_ulong r3,
target_ulong r4);
+struct nested_ppc_state;
+
typedef struct SpaprCpuState {
uint64_t vpa_addr;
uint64_t slb_shadow_addr, slb_shadow_size;
@@ -51,6 +50,10 @@ typedef struct SpaprCpuState {
bool prod; /* not migrated, only used to improve dispatch latencies */
struct ICPState *icp;
struct XiveTCTX *tctx;
+
+ /* Fields for nested-HV support */
+ bool in_nested; /* true while the L2 is executing */
+ struct nested_ppc_state *nested_host_state; /* holds the L1 state while L2 executes */
} SpaprCpuState;
static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h
index 21af8deac1..02a63b3666 100644
--- a/include/hw/ppc/spapr_drc.h
+++ b/include/hw/ppc/spapr_drc.h
@@ -29,62 +29,21 @@
TYPE_SPAPR_DR_CONNECTOR)
#define TYPE_SPAPR_DRC_PHYSICAL "spapr-drc-physical"
-#define SPAPR_DRC_PHYSICAL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_PHYSICAL)
-#define SPAPR_DRC_PHYSICAL_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, \
- TYPE_SPAPR_DRC_PHYSICAL)
#define SPAPR_DRC_PHYSICAL(obj) OBJECT_CHECK(SpaprDrcPhysical, (obj), \
TYPE_SPAPR_DRC_PHYSICAL)
#define TYPE_SPAPR_DRC_LOGICAL "spapr-drc-logical"
-#define SPAPR_DRC_LOGICAL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_LOGICAL)
-#define SPAPR_DRC_LOGICAL_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, \
- TYPE_SPAPR_DRC_LOGICAL)
-#define SPAPR_DRC_LOGICAL(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_LOGICAL)
#define TYPE_SPAPR_DRC_CPU "spapr-drc-cpu"
-#define SPAPR_DRC_CPU_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_CPU)
-#define SPAPR_DRC_CPU_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_CPU)
-#define SPAPR_DRC_CPU(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_CPU)
#define TYPE_SPAPR_DRC_PCI "spapr-drc-pci"
-#define SPAPR_DRC_PCI_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_PCI)
-#define SPAPR_DRC_PCI_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_PCI)
-#define SPAPR_DRC_PCI(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_PCI)
#define TYPE_SPAPR_DRC_LMB "spapr-drc-lmb"
-#define SPAPR_DRC_LMB_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_LMB)
-#define SPAPR_DRC_LMB_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_LMB)
-#define SPAPR_DRC_LMB(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_LMB)
#define TYPE_SPAPR_DRC_PHB "spapr-drc-phb"
-#define SPAPR_DRC_PHB_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_PHB)
-#define SPAPR_DRC_PHB_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_PHB)
-#define SPAPR_DRC_PHB(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_PHB)
#define TYPE_SPAPR_DRC_PMEM "spapr-drc-pmem"
-#define SPAPR_DRC_PMEM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_PMEM)
-#define SPAPR_DRC_PMEM_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_PMEM)
-#define SPAPR_DRC_PMEM(obj) OBJECT_CHECK(SpaprDrc, (obj), \
- TYPE_SPAPR_DRC_PMEM)
+
/*
* Various hotplug types managed by SpaprDrc
*
@@ -265,7 +224,8 @@ static inline bool spapr_drc_hotplugged(DeviceState *dev)
return dev->hotplugged && !runstate_check(RUN_STATE_INMIGRATE);
}
-void spapr_drc_reset(SpaprDrc *drc);
+/* Returns true if an unplug request completed */
+bool spapr_drc_reset(SpaprDrc *drc);
uint32_t spapr_drc_index(SpaprDrc *drc);
SpaprDrcType spapr_drc_type(SpaprDrc *drc);
@@ -276,11 +236,20 @@ SpaprDrc *spapr_drc_by_index(uint32_t index);
SpaprDrc *spapr_drc_by_id(const char *type, uint32_t id);
int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask);
-void spapr_drc_attach(SpaprDrc *drc, DeviceState *d, Error **errp);
-void spapr_drc_detach(SpaprDrc *drc);
+/*
+ * These functions respectively abort if called with a device already
+ * attached or no device attached. In the case of spapr_drc_attach(),
+ * this means that the attachability of the DRC *must* be checked
+ * beforehand (eg. check drc->dev at pre-plug).
+ */
+void spapr_drc_attach(SpaprDrc *drc, DeviceState *d);
+void spapr_drc_unplug_request(SpaprDrc *drc);
-/* Returns true if a hot plug/unplug request is pending */
-bool spapr_drc_transient(SpaprDrc *drc);
+/*
+ * Reset all DRCs, causing pending hot-plug/unplug requests to complete.
+ * Safely handles potential DRC removal (eg. PHBs or PCI bridges).
+ */
+void spapr_drc_reset_all(struct SpaprMachineState *spapr);
static inline bool spapr_drc_unplug_requested(SpaprDrc *drc)
{
diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h
index ca8cb44213..4fd2d5853d 100644
--- a/include/hw/ppc/spapr_irq.h
+++ b/include/hw/ppc/spapr_irq.h
@@ -11,11 +11,24 @@
#define HW_SPAPR_IRQ_H
#include "target/ppc/cpu-qom.h"
+#include "qom/object.h"
/*
- * IRQ range offsets per device type
+ * The XIVE IRQ backend uses the same layout as the XICS backend but
+ * covers the full range of the IRQ number space. The IRQ numbers for
+ * the CPU IPIs are allocated at the bottom of this space, below 4K,
+ * to preserve compatibility with XICS which does not use that range.
+ */
+
+/*
+ * CPU IPI range (XIVE only)
*/
#define SPAPR_IRQ_IPI 0x0
+#define SPAPR_IRQ_NR_IPIS 0x1000
+
+/*
+ * IRQ range offsets per device type
+ */
#define SPAPR_XIRQ_BASE XICS_IRQ_BASE /* 0x1000 */
#define SPAPR_IRQ_EPOW (SPAPR_XIRQ_BASE + 0x0000)
@@ -28,19 +41,18 @@
#define SPAPR_NR_XIRQS 0x1000
-typedef struct SpaprMachineState SpaprMachineState;
+struct SpaprMachineState;
typedef struct SpaprInterruptController SpaprInterruptController;
#define TYPE_SPAPR_INTC "spapr-interrupt-controller"
#define SPAPR_INTC(obj) \
INTERFACE_CHECK(SpaprInterruptController, (obj), TYPE_SPAPR_INTC)
-#define SPAPR_INTC_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprInterruptControllerClass, (klass), TYPE_SPAPR_INTC)
-#define SPAPR_INTC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprInterruptControllerClass, (obj), TYPE_SPAPR_INTC)
+typedef struct SpaprInterruptControllerClass SpaprInterruptControllerClass;
+DECLARE_CLASS_CHECKERS(SpaprInterruptControllerClass, SPAPR_INTC,
+ TYPE_SPAPR_INTC)
-typedef struct SpaprInterruptControllerClass {
+struct SpaprInterruptControllerClass {
InterfaceClass parent;
int (*activate)(SpaprInterruptController *intc, uint32_t nr_servers,
@@ -65,22 +77,22 @@ typedef struct SpaprInterruptControllerClass {
void (*dt)(SpaprInterruptController *intc, uint32_t nr_servers,
void *fdt, uint32_t phandle);
int (*post_load)(SpaprInterruptController *intc, int version_id);
-} SpaprInterruptControllerClass;
+};
-void spapr_irq_update_active_intc(SpaprMachineState *spapr);
+void spapr_irq_update_active_intc(struct SpaprMachineState *spapr);
-int spapr_irq_cpu_intc_create(SpaprMachineState *spapr,
+int spapr_irq_cpu_intc_create(struct SpaprMachineState *spapr,
PowerPCCPU *cpu, Error **errp);
-void spapr_irq_cpu_intc_reset(SpaprMachineState *spapr, PowerPCCPU *cpu);
-void spapr_irq_cpu_intc_destroy(SpaprMachineState *spapr, PowerPCCPU *cpu);
-void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon);
-void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers,
+void spapr_irq_cpu_intc_reset(struct SpaprMachineState *spapr, PowerPCCPU *cpu);
+void spapr_irq_cpu_intc_destroy(struct SpaprMachineState *spapr, PowerPCCPU *cpu);
+void spapr_irq_print_info(struct SpaprMachineState *spapr, Monitor *mon);
+void spapr_irq_dt(struct SpaprMachineState *spapr, uint32_t nr_servers,
void *fdt, uint32_t phandle);
-uint32_t spapr_irq_nr_msis(SpaprMachineState *spapr);
-int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
+uint32_t spapr_irq_nr_msis(struct SpaprMachineState *spapr);
+int spapr_irq_msi_alloc(struct SpaprMachineState *spapr, uint32_t num, bool align,
Error **errp);
-void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num);
+void spapr_irq_msi_free(struct SpaprMachineState *spapr, int irq, uint32_t num);
typedef struct SpaprIrq {
bool xics;
@@ -92,13 +104,13 @@ extern SpaprIrq spapr_irq_xics_legacy;
extern SpaprIrq spapr_irq_xive;
extern SpaprIrq spapr_irq_dual;
-void spapr_irq_init(SpaprMachineState *spapr, Error **errp);
-int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp);
-void spapr_irq_free(SpaprMachineState *spapr, int irq, int num);
-qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq);
-int spapr_irq_post_load(SpaprMachineState *spapr, int version_id);
-void spapr_irq_reset(SpaprMachineState *spapr, Error **errp);
-int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp);
+void spapr_irq_init(struct SpaprMachineState *spapr, Error **errp);
+int spapr_irq_claim(struct SpaprMachineState *spapr, int irq, bool lsi, Error **errp);
+void spapr_irq_free(struct SpaprMachineState *spapr, int irq, int num);
+qemu_irq spapr_qirq(struct SpaprMachineState *spapr, int irq);
+int spapr_irq_post_load(struct SpaprMachineState *spapr, int version_id);
+void spapr_irq_reset(struct SpaprMachineState *spapr, Error **errp);
+int spapr_irq_get_phandle(struct SpaprMachineState *spapr, void *fdt, Error **errp);
typedef int (*SpaprInterruptControllerInitKvm)(SpaprInterruptController *,
uint32_t, Error **);
@@ -111,7 +123,7 @@ int spapr_irq_init_kvm(SpaprInterruptControllerInitKvm fn,
/*
* XICS legacy routines
*/
-int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp);
+int spapr_irq_find(struct SpaprMachineState *spapr, int num, bool align, Error **errp);
#define spapr_irq_findone(spapr, errp) spapr_irq_find(spapr, 1, false, errp)
#endif
diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h
new file mode 100644
index 0000000000..93ef14adcc
--- /dev/null
+++ b/include/hw/ppc/spapr_nested.h
@@ -0,0 +1,524 @@
+#ifndef HW_SPAPR_NESTED_H
+#define HW_SPAPR_NESTED_H
+
+#include "target/ppc/cpu.h"
+
+/* Guest State Buffer Element IDs */
+#define GSB_HV_VCPU_IGNORED_ID 0x0000 /* An element whose value is ignored */
+#define GSB_HV_VCPU_STATE_SIZE 0x0001 /* HV internal format VCPU state size */
+#define GSB_VCPU_OUT_BUF_MIN_SZ 0x0002 /* Min size of the Run VCPU o/p buffer */
+#define GSB_VCPU_LPVR 0x0003 /* Logical PVR */
+#define GSB_TB_OFFSET 0x0004 /* Timebase Offset */
+#define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */
+#define GSB_PROCESS_TBL 0x0006 /* Process Table */
+ /* RESERVED 0x0007 - 0x0BFF */
+#define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */
+#define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */
+#define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */
+ /* RESERVED 0x0C03 - 0x0FFF */
+#define GSB_VCPU_GPR0 0x1000
+#define GSB_VCPU_GPR1 0x1001
+#define GSB_VCPU_GPR2 0x1002
+#define GSB_VCPU_GPR3 0x1003
+#define GSB_VCPU_GPR4 0x1004
+#define GSB_VCPU_GPR5 0x1005
+#define GSB_VCPU_GPR6 0x1006
+#define GSB_VCPU_GPR7 0x1007
+#define GSB_VCPU_GPR8 0x1008
+#define GSB_VCPU_GPR9 0x1009
+#define GSB_VCPU_GPR10 0x100A
+#define GSB_VCPU_GPR11 0x100B
+#define GSB_VCPU_GPR12 0x100C
+#define GSB_VCPU_GPR13 0x100D
+#define GSB_VCPU_GPR14 0x100E
+#define GSB_VCPU_GPR15 0x100F
+#define GSB_VCPU_GPR16 0x1010
+#define GSB_VCPU_GPR17 0x1011
+#define GSB_VCPU_GPR18 0x1012
+#define GSB_VCPU_GPR19 0x1013
+#define GSB_VCPU_GPR20 0x1014
+#define GSB_VCPU_GPR21 0x1015
+#define GSB_VCPU_GPR22 0x1016
+#define GSB_VCPU_GPR23 0x1017
+#define GSB_VCPU_GPR24 0x1018
+#define GSB_VCPU_GPR25 0x1019
+#define GSB_VCPU_GPR26 0x101A
+#define GSB_VCPU_GPR27 0x101B
+#define GSB_VCPU_GPR28 0x101C
+#define GSB_VCPU_GPR29 0x101D
+#define GSB_VCPU_GPR30 0x101E
+#define GSB_VCPU_GPR31 0x101F
+#define GSB_VCPU_HDEC_EXPIRY_TB 0x1020
+#define GSB_VCPU_SPR_NIA 0x1021
+#define GSB_VCPU_SPR_MSR 0x1022
+#define GSB_VCPU_SPR_LR 0x1023
+#define GSB_VCPU_SPR_XER 0x1024
+#define GSB_VCPU_SPR_CTR 0x1025
+#define GSB_VCPU_SPR_CFAR 0x1026
+#define GSB_VCPU_SPR_SRR0 0x1027
+#define GSB_VCPU_SPR_SRR1 0x1028
+#define GSB_VCPU_SPR_DAR 0x1029
+#define GSB_VCPU_DEC_EXPIRE_TB 0x102A
+#define GSB_VCPU_SPR_VTB 0x102B
+#define GSB_VCPU_SPR_LPCR 0x102C
+#define GSB_VCPU_SPR_HFSCR 0x102D
+#define GSB_VCPU_SPR_FSCR 0x102E
+#define GSB_VCPU_SPR_FPSCR 0x102F
+#define GSB_VCPU_SPR_DAWR0 0x1030
+#define GSB_VCPU_SPR_DAWR1 0x1031
+#define GSB_VCPU_SPR_CIABR 0x1032
+#define GSB_VCPU_SPR_PURR 0x1033
+#define GSB_VCPU_SPR_SPURR 0x1034
+#define GSB_VCPU_SPR_IC 0x1035
+#define GSB_VCPU_SPR_SPRG0 0x1036
+#define GSB_VCPU_SPR_SPRG1 0x1037
+#define GSB_VCPU_SPR_SPRG2 0x1038
+#define GSB_VCPU_SPR_SPRG3 0x1039
+#define GSB_VCPU_SPR_PPR 0x103A
+#define GSB_VCPU_SPR_MMCR0 0x103B
+#define GSB_VCPU_SPR_MMCR1 0x103C
+#define GSB_VCPU_SPR_MMCR2 0x103D
+#define GSB_VCPU_SPR_MMCR3 0x103E
+#define GSB_VCPU_SPR_MMCRA 0x103F
+#define GSB_VCPU_SPR_SIER 0x1040
+#define GSB_VCPU_SPR_SIER2 0x1041
+#define GSB_VCPU_SPR_SIER3 0x1042
+#define GSB_VCPU_SPR_BESCR 0x1043
+#define GSB_VCPU_SPR_EBBHR 0x1044
+#define GSB_VCPU_SPR_EBBRR 0x1045
+#define GSB_VCPU_SPR_AMR 0x1046
+#define GSB_VCPU_SPR_IAMR 0x1047
+#define GSB_VCPU_SPR_AMOR 0x1048
+#define GSB_VCPU_SPR_UAMOR 0x1049
+#define GSB_VCPU_SPR_SDAR 0x104A
+#define GSB_VCPU_SPR_SIAR 0x104B
+#define GSB_VCPU_SPR_DSCR 0x104C
+#define GSB_VCPU_SPR_TAR 0x104D
+#define GSB_VCPU_SPR_DEXCR 0x104E
+#define GSB_VCPU_SPR_HDEXCR 0x104F
+#define GSB_VCPU_SPR_HASHKEYR 0x1050
+#define GSB_VCPU_SPR_HASHPKEYR 0x1051
+#define GSB_VCPU_SPR_CTRL 0x1052
+ /* RESERVED 0x1053 - 0x1FFF */
+#define GSB_VCPU_SPR_CR 0x2000
+#define GSB_VCPU_SPR_PIDR 0x2001
+#define GSB_VCPU_SPR_DSISR 0x2002
+#define GSB_VCPU_SPR_VSCR 0x2003
+#define GSB_VCPU_SPR_VRSAVE 0x2004
+#define GSB_VCPU_SPR_DAWRX0 0x2005
+#define GSB_VCPU_SPR_DAWRX1 0x2006
+#define GSB_VCPU_SPR_PMC1 0x2007
+#define GSB_VCPU_SPR_PMC2 0x2008
+#define GSB_VCPU_SPR_PMC3 0x2009
+#define GSB_VCPU_SPR_PMC4 0x200A
+#define GSB_VCPU_SPR_PMC5 0x200B
+#define GSB_VCPU_SPR_PMC6 0x200C
+#define GSB_VCPU_SPR_WORT 0x200D
+#define GSB_VCPU_SPR_PSPB 0x200E
+ /* RESERVED 0x200F - 0x2FFF */
+#define GSB_VCPU_SPR_VSR0 0x3000
+#define GSB_VCPU_SPR_VSR1 0x3001
+#define GSB_VCPU_SPR_VSR2 0x3002
+#define GSB_VCPU_SPR_VSR3 0x3003
+#define GSB_VCPU_SPR_VSR4 0x3004
+#define GSB_VCPU_SPR_VSR5 0x3005
+#define GSB_VCPU_SPR_VSR6 0x3006
+#define GSB_VCPU_SPR_VSR7 0x3007
+#define GSB_VCPU_SPR_VSR8 0x3008
+#define GSB_VCPU_SPR_VSR9 0x3009
+#define GSB_VCPU_SPR_VSR10 0x300A
+#define GSB_VCPU_SPR_VSR11 0x300B
+#define GSB_VCPU_SPR_VSR12 0x300C
+#define GSB_VCPU_SPR_VSR13 0x300D
+#define GSB_VCPU_SPR_VSR14 0x300E
+#define GSB_VCPU_SPR_VSR15 0x300F
+#define GSB_VCPU_SPR_VSR16 0x3010
+#define GSB_VCPU_SPR_VSR17 0x3011
+#define GSB_VCPU_SPR_VSR18 0x3012
+#define GSB_VCPU_SPR_VSR19 0x3013
+#define GSB_VCPU_SPR_VSR20 0x3014
+#define GSB_VCPU_SPR_VSR21 0x3015
+#define GSB_VCPU_SPR_VSR22 0x3016
+#define GSB_VCPU_SPR_VSR23 0x3017
+#define GSB_VCPU_SPR_VSR24 0x3018
+#define GSB_VCPU_SPR_VSR25 0x3019
+#define GSB_VCPU_SPR_VSR26 0x301A
+#define GSB_VCPU_SPR_VSR27 0x301B
+#define GSB_VCPU_SPR_VSR28 0x301C
+#define GSB_VCPU_SPR_VSR29 0x301D
+#define GSB_VCPU_SPR_VSR30 0x301E
+#define GSB_VCPU_SPR_VSR31 0x301F
+#define GSB_VCPU_SPR_VSR32 0x3020
+#define GSB_VCPU_SPR_VSR33 0x3021
+#define GSB_VCPU_SPR_VSR34 0x3022
+#define GSB_VCPU_SPR_VSR35 0x3023
+#define GSB_VCPU_SPR_VSR36 0x3024
+#define GSB_VCPU_SPR_VSR37 0x3025
+#define GSB_VCPU_SPR_VSR38 0x3026
+#define GSB_VCPU_SPR_VSR39 0x3027
+#define GSB_VCPU_SPR_VSR40 0x3028
+#define GSB_VCPU_SPR_VSR41 0x3029
+#define GSB_VCPU_SPR_VSR42 0x302A
+#define GSB_VCPU_SPR_VSR43 0x302B
+#define GSB_VCPU_SPR_VSR44 0x302C
+#define GSB_VCPU_SPR_VSR45 0x302D
+#define GSB_VCPU_SPR_VSR46 0x302E
+#define GSB_VCPU_SPR_VSR47 0x302F
+#define GSB_VCPU_SPR_VSR48 0x3030
+#define GSB_VCPU_SPR_VSR49 0x3031
+#define GSB_VCPU_SPR_VSR50 0x3032
+#define GSB_VCPU_SPR_VSR51 0x3033
+#define GSB_VCPU_SPR_VSR52 0x3034
+#define GSB_VCPU_SPR_VSR53 0x3035
+#define GSB_VCPU_SPR_VSR54 0x3036
+#define GSB_VCPU_SPR_VSR55 0x3037
+#define GSB_VCPU_SPR_VSR56 0x3038
+#define GSB_VCPU_SPR_VSR57 0x3039
+#define GSB_VCPU_SPR_VSR58 0x303A
+#define GSB_VCPU_SPR_VSR59 0x303B
+#define GSB_VCPU_SPR_VSR60 0x303C
+#define GSB_VCPU_SPR_VSR61 0x303D
+#define GSB_VCPU_SPR_VSR62 0x303E
+#define GSB_VCPU_SPR_VSR63 0x303F
+ /* RESERVED 0x3040 - 0xEFFF */
+#define GSB_VCPU_SPR_HDAR 0xF000
+#define GSB_VCPU_SPR_HDSISR 0xF001
+#define GSB_VCPU_SPR_HEIR 0xF002
+#define GSB_VCPU_SPR_ASDR 0xF003
+/* End of list of Guest State Buffer Element IDs */
+#define GSB_LAST GSB_VCPU_SPR_ASDR
+
+typedef struct SpaprMachineStateNested {
+ uint64_t ptcr;
+ uint8_t api;
+#define NESTED_API_KVM_HV 1
+#define NESTED_API_PAPR 2
+ bool capabilities_set;
+ uint32_t pvr_base;
+ GHashTable *guests;
+} SpaprMachineStateNested;
+
+typedef struct SpaprMachineStateNestedGuest {
+ uint32_t pvr_logical;
+ unsigned long nr_vcpus;
+ uint64_t parttbl[2];
+ uint64_t tb_offset;
+ struct SpaprMachineStateNestedGuestVcpu *vcpus;
+} SpaprMachineStateNestedGuest;
+
+/* Nested PAPR API related macros */
+#define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000
+#define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000
+#define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000
+#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \
+ H_GUEST_CAPABILITIES_P9_MODE)
+#define H_GUEST_CAP_COPY_MEM_BMAP 0
+#define H_GUEST_CAP_P9_MODE_BMAP 1
+#define H_GUEST_CAP_P10_MODE_BMAP 2
+#define PAPR_NESTED_GUEST_MAX 4096
+#define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL
+#define PAPR_NESTED_GUEST_VCPU_MAX 2048
+#define VCPU_OUT_BUF_MIN_SZ 0x80ULL
+#define HVMASK_DEFAULT 0xffffffffffffffff
+#define HVMASK_LPCR 0x0070000003820800
+#define HVMASK_MSR 0xEBFFFFFFFFBFEFFF
+#define HVMASK_HDEXCR 0x00000000FFFFFFFF
+#define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF
+#define GSB_MAX_BUF_SIZE (1024 * 1024)
+#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000
+#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1
+#define GUEST_STATE_REQUEST_SET 0x2
+
+/*
+ * As per ISA v3.1B, following bits are reserved:
+ * 0:2
+ * 4:57 (ISA mentions bit 58 as well but it should be used for P10)
+ * 61:63 (hence, haven't included PCR bits for v2.06 and v2.05
+ * in LOW BITS)
+ */
+#define PCR_LOW_BITS (PCR_COMPAT_3_10 | PCR_COMPAT_3_00)
+#define HVMASK_PCR (~PCR_LOW_BITS)
+
+#define GUEST_STATE_ELEMENT(i, sz, s, f, ptr, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = ptr, \
+ .offset = offsetof(struct s, f), \
+ .copy = (c) \
+}
+
+#define GSBE_NESTED(i, sz, f, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_guest_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GSBE_NESTED_MSK(i, sz, f, c, m) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_guest_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\
+ .copy = (c), \
+ .mask = (m) \
+}
+
+#define GSBE_NESTED_VCPU(i, sz, f, c) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = get_vcpu_ptr, \
+ .offset = offsetof(struct SpaprMachineStateNestedGuestVcpu, f),\
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GUEST_STATE_ELEMENT_NOP(i, sz) { \
+ .id = (i), \
+ .size = (sz), \
+ .location = NULL, \
+ .offset = 0, \
+ .copy = NULL, \
+ .mask = HVMASK_DEFAULT \
+}
+
+#define GUEST_STATE_ELEMENT_NOP_DW(i) \
+ GUEST_STATE_ELEMENT_NOP(i, 8)
+#define GUEST_STATE_ELEMENT_NOP_W(i) \
+ GUEST_STATE_ELEMENT_NOP(i, 4)
+
+#define GUEST_STATE_ELEMENT_BASE(i, s, c) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = 0, \
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+ }
+
+#define GUEST_STATE_ELEMENT_OFF(i, s, f, c) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = offsetof(struct nested_ppc_state, f), \
+ .copy = (c), \
+ .mask = HVMASK_DEFAULT \
+ }
+
+#define GUEST_STATE_ELEMENT_MSK(i, s, f, c, m) { \
+ .id = (i), \
+ .size = (s), \
+ .location = get_vcpu_state_ptr, \
+ .offset = offsetof(struct nested_ppc_state, f), \
+ .copy = (c), \
+ .mask = (m) \
+ }
+
+#define GUEST_STATE_ELEMENT_ENV_QW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 16, f, copy_state_16to16)
+#define GUEST_STATE_ELEMENT_ENV_DW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 8, f, copy_state_8to8)
+#define GUEST_STATE_ELEMENT_ENV_W(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to8)
+#define GUEST_STATE_ELEMENT_ENV_WW(i, f) \
+ GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to4)
+#define GSE_ENV_DWM(i, f, m) \
+ GUEST_STATE_ELEMENT_MSK(i, 8, f, copy_state_8to8, m)
+
+struct guest_state_element {
+ uint16_t id;
+ uint16_t size;
+ uint8_t value[];
+} QEMU_PACKED;
+
+struct guest_state_buffer {
+ uint32_t num_elements;
+ struct guest_state_element elements[];
+} QEMU_PACKED;
+
+/* Actual buffer plus some metadata about the request */
+struct guest_state_request {
+ struct guest_state_buffer *gsb;
+ int64_t buf;
+ int64_t len;
+ uint16_t flags;
+};
+
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
+struct kvmppc_hv_guest_state {
+ uint64_t version; /* version of this structure layout, must be first */
+ uint32_t lpid;
+ uint32_t vcpu_token;
+ /* These registers are hypervisor privileged (at least for writing) */
+ uint64_t lpcr;
+ uint64_t pcr;
+ uint64_t amor;
+ uint64_t dpdes;
+ uint64_t hfscr;
+ int64_t tb_offset;
+ uint64_t dawr0;
+ uint64_t dawrx0;
+ uint64_t ciabr;
+ uint64_t hdec_expiry;
+ uint64_t purr;
+ uint64_t spurr;
+ uint64_t ic;
+ uint64_t vtb;
+ uint64_t hdar;
+ uint64_t hdsisr;
+ uint64_t heir;
+ uint64_t asdr;
+ /* These are OS privileged but need to be set late in guest entry */
+ uint64_t srr0;
+ uint64_t srr1;
+ uint64_t sprg[4];
+ uint64_t pidr;
+ uint64_t cfar;
+ uint64_t ppr;
+ /* Version 1 ends here */
+ uint64_t dawr1;
+ uint64_t dawrx1;
+ /* Version 2 ends here */
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION 2
+
+/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
+struct kvmppc_pt_regs {
+ uint64_t gpr[32];
+ uint64_t nip;
+ uint64_t msr;
+ uint64_t orig_gpr3; /* Used for restarting system calls */
+ uint64_t ctr;
+ uint64_t link;
+ uint64_t xer;
+ uint64_t ccr;
+ uint64_t softe; /* Soft enabled/disabled */
+ uint64_t trap; /* Reason for being here */
+ uint64_t dar; /* Fault registers */
+ uint64_t dsisr; /* on 4xx/Book-E used for ESR */
+ uint64_t result; /* Result of a system call */
+};
+
+/*
+ * nested_ppc_state is used to save the host CPU state before switching it to
+ * the guest CPU state, to be restored on H_ENTER_NESTED exit.
+ */
+struct nested_ppc_state {
+ uint64_t gpr[32];
+ uint64_t lr;
+ uint64_t ctr;
+ uint64_t cfar;
+ uint64_t msr;
+ uint64_t nip;
+ uint32_t cr;
+
+ uint64_t xer;
+
+ uint64_t lpcr;
+ uint64_t lpidr;
+ uint64_t pidr;
+ uint64_t pcr;
+ uint64_t dpdes;
+ uint64_t hfscr;
+ uint64_t srr0;
+ uint64_t srr1;
+ uint64_t sprg0;
+ uint64_t sprg1;
+ uint64_t sprg2;
+ uint64_t sprg3;
+ uint64_t ppr;
+
+ int64_t tb_offset;
+ /* Nested PAPR API */
+ uint64_t amor;
+ uint64_t dawr0;
+ uint64_t dawrx0;
+ uint64_t ciabr;
+ uint64_t purr;
+ uint64_t spurr;
+ uint64_t ic;
+ uint64_t vtb;
+ uint64_t hdar;
+ uint64_t hdsisr;
+ uint64_t heir;
+ uint64_t asdr;
+ uint64_t dawr1;
+ uint64_t dawrx1;
+ uint64_t dexcr;
+ uint64_t hdexcr;
+ uint64_t hashkeyr;
+ uint64_t hashpkeyr;
+ ppc_vsr_t vsr[64] QEMU_ALIGNED(16);
+ uint64_t ebbhr;
+ uint64_t tar;
+ uint64_t ebbrr;
+ uint64_t bescr;
+ uint64_t iamr;
+ uint64_t amr;
+ uint64_t uamor;
+ uint64_t dscr;
+ uint64_t fscr;
+ uint64_t pspb;
+ uint64_t ctrl;
+ uint64_t vrsave;
+ uint64_t dar;
+ uint64_t dsisr;
+ uint64_t pmc1;
+ uint64_t pmc2;
+ uint64_t pmc3;
+ uint64_t pmc4;
+ uint64_t pmc5;
+ uint64_t pmc6;
+ uint64_t mmcr0;
+ uint64_t mmcr1;
+ uint64_t mmcr2;
+ uint64_t mmcra;
+ uint64_t sdar;
+ uint64_t siar;
+ uint64_t sier;
+ uint32_t vscr;
+ uint64_t fpscr;
+ int64_t dec_expiry_tb;
+};
+
+struct SpaprMachineStateNestedGuestVcpuRunBuf {
+ uint64_t addr;
+ uint64_t size;
+};
+
+typedef struct SpaprMachineStateNestedGuestVcpu {
+ bool enabled;
+ struct nested_ppc_state state;
+ struct SpaprMachineStateNestedGuestVcpuRunBuf runbufin;
+ struct SpaprMachineStateNestedGuestVcpuRunBuf runbufout;
+ int64_t tb_offset;
+ uint64_t hdecr_expiry_tb;
+} SpaprMachineStateNestedGuestVcpu;
+
+struct guest_state_element_type {
+ uint16_t id;
+ int size;
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1
+#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2
+ uint16_t flags;
+ void *(*location)(SpaprMachineStateNestedGuest *, target_ulong);
+ size_t offset;
+ void (*copy)(void *, void *, bool);
+ uint64_t mask;
+};
+
+void spapr_exit_nested(PowerPCCPU *cpu, int excp);
+typedef struct SpaprMachineState SpaprMachineState;
+bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
+ target_ulong lpid, ppc_v3_pate_t *entry);
+uint8_t spapr_nested_api(SpaprMachineState *spapr);
+void spapr_nested_gsb_init(void);
+bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
+ target_ulong lpid, ppc_v3_pate_t *entry);
+#endif /* HW_SPAPR_NESTED_H */
diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h
new file mode 100644
index 0000000000..7cb3367400
--- /dev/null
+++ b/include/hw/ppc/spapr_numa.h
@@ -0,0 +1,37 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition NUMA associativity handling
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_SPAPR_NUMA_H
+#define HW_SPAPR_NUMA_H
+
+#include "hw/boards.h"
+#include "hw/ppc/spapr.h"
+
+/*
+ * Having both SpaprMachineState and MachineState as arguments
+ * feels odd, but it will spare a MACHINE() call inside the
+ * function. spapr_machine_init() is the only caller for it, and
+ * it has both pointers resolved already.
+ */
+void spapr_numa_associativity_init(SpaprMachineState *spapr,
+ MachineState *machine);
+void spapr_numa_associativity_check(SpaprMachineState *spapr);
+void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas);
+void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, int nodeid);
+int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, PowerPCCPU *cpu);
+int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
+ int offset);
+unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine);
+
+#endif /* HW_SPAPR_NUMA_H */
diff --git a/include/hw/ppc/spapr_nvdimm.h b/include/hw/ppc/spapr_nvdimm.h
index b3330cc485..e9436cb6ef 100644
--- a/include/hw/ppc/spapr_nvdimm.h
+++ b/include/hw/ppc/spapr_nvdimm.h
@@ -11,27 +11,16 @@
#define HW_SPAPR_NVDIMM_H
#include "hw/mem/nvdimm.h"
-#include "hw/ppc/spapr.h"
-/*
- * The nvdimm size should be aligned to SCM block size.
- * The SCM block size should be aligned to SPAPR_MEMORY_BLOCK_SIZE
- * inorder to have SCM regions not to overlap with dimm memory regions.
- * The SCM devices can have variable block sizes. For now, fixing the
- * block size to the minimum value.
- */
-#define SPAPR_MINIMUM_SCM_BLOCK_SIZE SPAPR_MEMORY_BLOCK_SIZE
-
-/* Have an explicit check for alignment */
-QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE);
+typedef struct SpaprDrc SpaprDrc;
+typedef struct SpaprMachineState SpaprMachineState;
int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
void *fdt, int *fdt_start_offset, Error **errp);
-int spapr_dt_nvdimm(void *fdt, int parent_offset, NVDIMMDevice *nvdimm);
-void spapr_dt_persistent_memory(void *fdt);
-void spapr_nvdimm_validate_opts(NVDIMMDevice *nvdimm, uint64_t size,
- Error **errp);
-void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp);
-void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr);
+void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt);
+bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
+ uint64_t size, Error **errp);
+void spapr_add_nvdimm(DeviceState *dev, uint64_t slot);
+void spapr_nvdimm_finish_flushes(void);
#endif
diff --git a/include/hw/ppc/spapr_ovec.h b/include/hw/ppc/spapr_ovec.h
index d4dee9e06a..c3e8b98e7e 100644
--- a/include/hw/ppc/spapr_ovec.h
+++ b/include/hw/ppc/spapr_ovec.h
@@ -49,6 +49,7 @@ typedef struct SpaprOptionVector SpaprOptionVector;
/* option vector 5 */
#define OV5_DRCONF_MEMORY OV_BIT(2, 2)
#define OV5_FORM1_AFFINITY OV_BIT(5, 0)
+#define OV5_FORM2_AFFINITY OV_BIT(5, 2)
#define OV5_HP_EVT OV_BIT(6, 5)
#define OV5_HPT_RESIZE OV_BIT(6, 7)
#define OV5_DRMEM_V2 OV_BIT(22, 0)
@@ -71,6 +72,7 @@ void spapr_ovec_cleanup(SpaprOptionVector *ov);
void spapr_ovec_set(SpaprOptionVector *ov, long bitnr);
void spapr_ovec_clear(SpaprOptionVector *ov, long bitnr);
bool spapr_ovec_test(SpaprOptionVector *ov, long bitnr);
+bool spapr_ovec_empty(SpaprOptionVector *ov);
SpaprOptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector);
int spapr_dt_ovec(void *fdt, int fdt_offset,
SpaprOptionVector *ov, const char *name);
diff --git a/include/hw/ppc/spapr_rtas.h b/include/hw/ppc/spapr_rtas.h
deleted file mode 100644
index 383611f10f..0000000000
--- a/include/hw/ppc/spapr_rtas.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef HW_SPAPR_RTAS_H
-#define HW_SPAPR_RTAS_H
-/*
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
- uint32_t nret, uint64_t rets);
-#endif /* HW_SPAPR_RTAS_H */
diff --git a/include/hw/ppc/spapr_tpm_proxy.h b/include/hw/ppc/spapr_tpm_proxy.h
index c574e22ba4..96d2a9697e 100644
--- a/include/hw/ppc/spapr_tpm_proxy.h
+++ b/include/hw/ppc/spapr_tpm_proxy.h
@@ -17,15 +17,14 @@
#include "hw/qdev-core.h"
#define TYPE_SPAPR_TPM_PROXY "spapr-tpm-proxy"
-#define SPAPR_TPM_PROXY(obj) OBJECT_CHECK(SpaprTpmProxy, (obj), \
- TYPE_SPAPR_TPM_PROXY)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprTpmProxy, SPAPR_TPM_PROXY)
-typedef struct SpaprTpmProxy {
+struct SpaprTpmProxy {
/*< private >*/
DeviceState parent;
char *host_path;
int host_fd;
-} SpaprTpmProxy;
+};
#endif /* HW_SPAPR_TPM_PROXY_H */
diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
index bed7df60e3..7eae1a4847 100644
--- a/include/hw/ppc/spapr_vio.h
+++ b/include/hw/ppc/spapr_vio.h
@@ -11,7 +11,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -25,17 +25,14 @@
#include "hw/ppc/spapr.h"
#include "sysemu/dma.h"
#include "hw/irq.h"
+#include "qom/object.h"
#define TYPE_VIO_SPAPR_DEVICE "vio-spapr-device"
-#define VIO_SPAPR_DEVICE(obj) \
- OBJECT_CHECK(SpaprVioDevice, (obj), TYPE_VIO_SPAPR_DEVICE)
-#define VIO_SPAPR_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SpaprVioDeviceClass, (klass), TYPE_VIO_SPAPR_DEVICE)
-#define VIO_SPAPR_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SpaprVioDeviceClass, (obj), TYPE_VIO_SPAPR_DEVICE)
+OBJECT_DECLARE_TYPE(SpaprVioDevice, SpaprVioDeviceClass,
+ VIO_SPAPR_DEVICE)
#define TYPE_SPAPR_VIO_BUS "spapr-vio-bus"
-#define SPAPR_VIO_BUS(obj) OBJECT_CHECK(SpaprVioBus, (obj), TYPE_SPAPR_VIO_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioBus, SPAPR_VIO_BUS)
#define TYPE_SPAPR_VIO_BRIDGE "spapr-vio-bridge"
@@ -46,10 +43,8 @@ typedef struct SpaprVioCrq {
int(*SendFunc)(struct SpaprVioDevice *vdev, uint8_t *crq);
} SpaprVioCrq;
-typedef struct SpaprVioDevice SpaprVioDevice;
-typedef struct SpaprVioBus SpaprVioBus;
-typedef struct SpaprVioDeviceClass {
+struct SpaprVioDeviceClass {
DeviceClass parent_class;
const char *dt_name, *dt_type, *dt_compatible;
@@ -59,7 +54,7 @@ typedef struct SpaprVioDeviceClass {
void (*reset)(SpaprVioDevice *dev);
int (*devnode)(SpaprVioDevice *dev, void *fdt, int node_off);
const char *(*get_dt_compatible)(SpaprVioDevice *dev);
-} SpaprVioDeviceClass;
+};
struct SpaprVioDevice {
DeviceState qdev;
@@ -96,35 +91,47 @@ static inline void spapr_vio_irq_pulse(SpaprVioDevice *dev)
static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr,
uint32_t size, DMADirection dir)
{
- return dma_memory_valid(&dev->as, taddr, size, dir);
+ return dma_memory_valid(&dev->as, taddr, size, dir, MEMTXATTRS_UNSPECIFIED);
}
static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr,
void *buf, uint32_t size)
{
- return (dma_memory_read(&dev->as, taddr, buf, size) != 0) ?
+ return (dma_memory_read(&dev->as, taddr,
+ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr,
const void *buf, uint32_t size)
{
- return (dma_memory_write(&dev->as, taddr, buf, size) != 0) ?
+ return (dma_memory_write(&dev->as, taddr,
+ buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr,
uint8_t c, uint32_t size)
{
- return (dma_memory_set(&dev->as, taddr, c, size) != 0) ?
+ return (dma_memory_set(&dev->as, taddr,
+ c, size, MEMTXATTRS_UNSPECIFIED) != 0) ?
H_DEST_PARM : H_SUCCESS;
}
-#define vio_stb(_dev, _addr, _val) (stb_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_sth(_dev, _addr, _val) (stw_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_stl(_dev, _addr, _val) (stl_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_stq(_dev, _addr, _val) (stq_be_dma(&(_dev)->as, (_addr), (_val)))
-#define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr)))
+#define vio_stb(_dev, _addr, _val) \
+ (stb_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_sth(_dev, _addr, _val) \
+ (stw_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_stl(_dev, _addr, _val) \
+ (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_stq(_dev, _addr, _val) \
+ (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED))
+#define vio_ldq(_dev, _addr) \
+ ({ \
+ uint64_t _val; \
+ ldq_be_dma(&(_dev)->as, (_addr), &_val, MEMTXATTRS_UNSPECIFIED); \
+ _val; \
+ })
int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq);
diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h
index 93d09d68de..b282960ad9 100644
--- a/include/hw/ppc/spapr_xive.h
+++ b/include/hw/ppc/spapr_xive.h
@@ -49,6 +49,8 @@ typedef struct SpaprXive {
void *tm_mmap;
MemoryRegion tm_mmio_kvm;
VMChangeStateEntry *change;
+
+ uint8_t hv_prio;
} SpaprXive;
typedef struct SpaprXiveClass {
@@ -64,9 +66,8 @@ typedef struct SpaprXiveClass {
*/
#define SPAPR_XIVE_BLOCK_ID 0x0
-void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon);
-
-void spapr_xive_hcall_init(SpaprMachineState *spapr);
+struct SpaprMachineState;
+void spapr_xive_hcall_init(struct SpaprMachineState *spapr);
void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable);
void spapr_xive_map_mmio(SpaprXive *xive);
@@ -80,15 +81,15 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
Error **errp);
void kvmppc_xive_disconnect(SpaprInterruptController *intc);
void kvmppc_xive_reset(SpaprXive *xive, Error **errp);
-void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
- Error **errp);
+int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
+ Error **errp);
void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp);
uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
uint64_t data, bool write);
-void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
+int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
uint32_t end_idx, XiveEND *end,
Error **errp);
-void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
+int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
uint32_t end_idx, XiveEND *end,
Error **errp);
void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp);
diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h
new file mode 100644
index 0000000000..d3f293da8b
--- /dev/null
+++ b/include/hw/ppc/vof.h
@@ -0,0 +1,65 @@
+/*
+ * Virtual Open Firmware
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_VOF_H
+#define HW_VOF_H
+
+#include "qom/object.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "exec/cpu-defs.h"
+
+typedef struct Vof {
+ uint64_t top_addr; /* copied from rma_size */
+ GArray *claimed; /* array of SpaprOfClaimed */
+ uint64_t claimed_base;
+ GHashTable *of_instances; /* ihandle -> SpaprOfInstance */
+ uint32_t of_instance_last;
+ char *bootargs;
+ long fw_size;
+} Vof;
+
+int vof_client_call(MachineState *ms, Vof *vof, void *fdt,
+ target_ulong args_real);
+uint64_t vof_claim(Vof *vof, uint64_t virt, uint64_t size, uint64_t align);
+void vof_init(Vof *vof, uint64_t top_addr, Error **errp);
+void vof_cleanup(Vof *vof);
+void vof_build_dt(void *fdt, Vof *vof);
+uint32_t vof_client_open_store(void *fdt, Vof *vof, const char *nodename,
+ const char *prop, const char *path);
+
+#define TYPE_VOF_MACHINE_IF "vof-machine-if"
+
+typedef struct VofMachineIfClass VofMachineIfClass;
+DECLARE_CLASS_CHECKERS(VofMachineIfClass, VOF_MACHINE, TYPE_VOF_MACHINE_IF)
+
+struct VofMachineIfClass {
+ InterfaceClass parent;
+ target_ulong (*client_architecture_support)(MachineState *ms, CPUState *cs,
+ target_ulong vec);
+ void (*quiesce)(MachineState *ms);
+ bool (*setprop)(MachineState *ms, const char *path, const char *propname,
+ void *val, int vallen);
+};
+
+/*
+ * Initial stack size is from
+ * https://www.devicetree.org/open-firmware/bindings/ppc/release/ppc-2_1.html#REF27292
+ *
+ * "Client programs shall be invoked with a valid stack pointer (r1) with
+ * at least 32K bytes of memory available for stack growth".
+ */
+#define VOF_STACK_SIZE 0x8000
+
+#define VOF_MEM_READ(pa, buf, size) \
+ address_space_read(&address_space_memory, \
+ (pa), MEMTXATTRS_UNSPECIFIED, (buf), (size))
+#define VOF_MEM_WRITE(pa, buf, size) \
+ address_space_write(&address_space_memory, \
+ (pa), MEMTXATTRS_UNSPECIFIED, (buf), (size))
+
+#define PROM_ERROR (~0U)
+
+#endif /* HW_VOF_H */
diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
index 9ed58ec7e9..95ead0dd7c 100644
--- a/include/hw/ppc/xics.h
+++ b/include/hw/ppc/xics.h
@@ -30,6 +30,7 @@
#include "exec/memory.h"
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define XICS_IPI 0x2
#define XICS_BUID 0x1
@@ -40,8 +41,6 @@
* (the kernel implementation supports more but we don't exploit
* that yet)
*/
-typedef struct ICPStateClass ICPStateClass;
-typedef struct ICPState ICPState;
typedef struct PnvICPState PnvICPState;
typedef struct ICSStateClass ICSStateClass;
typedef struct ICSState ICSState;
@@ -49,15 +48,13 @@ typedef struct ICSIRQState ICSIRQState;
typedef struct XICSFabric XICSFabric;
#define TYPE_ICP "icp"
-#define ICP(obj) OBJECT_CHECK(ICPState, (obj), TYPE_ICP)
+OBJECT_DECLARE_TYPE(ICPState, ICPStateClass,
+ ICP)
#define TYPE_PNV_ICP "pnv-icp"
-#define PNV_ICP(obj) OBJECT_CHECK(PnvICPState, (obj), TYPE_PNV_ICP)
+DECLARE_INSTANCE_CHECKER(PnvICPState, PNV_ICP,
+ TYPE_PNV_ICP)
-#define ICP_CLASS(klass) \
- OBJECT_CLASS_CHECK(ICPStateClass, (klass), TYPE_ICP)
-#define ICP_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ICPStateClass, (obj), TYPE_ICP)
struct ICPStateClass {
DeviceClass parent_class;
@@ -90,18 +87,15 @@ struct PnvICPState {
};
#define TYPE_ICS "ics"
-#define ICS(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS)
+DECLARE_OBJ_CHECKERS(ICSState, ICSStateClass,
+ ICS, TYPE_ICS)
-#define ICS_CLASS(klass) \
- OBJECT_CLASS_CHECK(ICSStateClass, (klass), TYPE_ICS)
-#define ICS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ICSStateClass, (obj), TYPE_ICS)
struct ICSStateClass {
DeviceClass parent_class;
DeviceRealize parent_realize;
- DeviceReset parent_reset;
+ ResettablePhases parent_phases;
void (*reject)(ICSState *s, uint32_t irq);
void (*resend)(ICSState *s);
@@ -145,17 +139,16 @@ struct ICSIRQState {
#define TYPE_XICS_FABRIC "xics-fabric"
#define XICS_FABRIC(obj) \
INTERFACE_CHECK(XICSFabric, (obj), TYPE_XICS_FABRIC)
-#define XICS_FABRIC_CLASS(klass) \
- OBJECT_CLASS_CHECK(XICSFabricClass, (klass), TYPE_XICS_FABRIC)
-#define XICS_FABRIC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XICSFabricClass, (obj), TYPE_XICS_FABRIC)
+typedef struct XICSFabricClass XICSFabricClass;
+DECLARE_CLASS_CHECKERS(XICSFabricClass, XICS_FABRIC,
+ TYPE_XICS_FABRIC)
-typedef struct XICSFabricClass {
+struct XICSFabricClass {
InterfaceClass parent;
ICSState *(*ics_get)(XICSFabric *xi, int irq);
void (*ics_resend)(XICSFabric *xi);
ICPState *(*icp_get)(XICSFabric *xi, int server);
-} XICSFabricClass;
+};
ICPState *xics_icp_get(XICSFabric *xi, int server);
diff --git a/include/hw/ppc/xics_spapr.h b/include/hw/ppc/xics_spapr.h
index 1c65c96e3c..de752c0d2c 100644
--- a/include/hw/ppc/xics_spapr.h
+++ b/include/hw/ppc/xics_spapr.h
@@ -28,13 +28,16 @@
#define XICS_SPAPR_H
#include "hw/ppc/spapr.h"
+#include "qom/object.h"
#define TYPE_ICS_SPAPR "ics-spapr"
-#define ICS_SPAPR(obj) OBJECT_CHECK(ICSState, (obj), TYPE_ICS_SPAPR)
+/* This is reusing the ICSState typedef from TYPE_ICS */
+DECLARE_INSTANCE_CHECKER(ICSState, ICS_SPAPR,
+ TYPE_ICS_SPAPR)
int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers,
Error **errp);
void xics_kvm_disconnect(SpaprInterruptController *intc);
-bool xics_kvm_has_broken_disconnect(SpaprMachineState *spapr);
+bool xics_kvm_has_broken_disconnect(void);
#endif /* XICS_SPAPR_H */
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index 705cf48176..f120874e0f 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -143,6 +143,7 @@
#include "sysemu/kvm.h"
#include "hw/sysbus.h"
#include "hw/ppc/xive_regs.h"
+#include "qom/object.h"
/*
* XIVE Notifier (Interface between Source and Router)
@@ -153,22 +154,21 @@ typedef struct XiveNotifier XiveNotifier;
#define TYPE_XIVE_NOTIFIER "xive-notifier"
#define XIVE_NOTIFIER(obj) \
INTERFACE_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER)
-#define XIVE_NOTIFIER_CLASS(klass) \
- OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER)
-#define XIVE_NOTIFIER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XiveNotifierClass, (obj), TYPE_XIVE_NOTIFIER)
+typedef struct XiveNotifierClass XiveNotifierClass;
+DECLARE_CLASS_CHECKERS(XiveNotifierClass, XIVE_NOTIFIER,
+ TYPE_XIVE_NOTIFIER)
-typedef struct XiveNotifierClass {
+struct XiveNotifierClass {
InterfaceClass parent;
- void (*notify)(XiveNotifier *xn, uint32_t lisn);
-} XiveNotifierClass;
+ void (*notify)(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+};
/*
* XIVE Interrupt Source
*/
#define TYPE_XIVE_SOURCE "xive-source"
-#define XIVE_SOURCE(obj) OBJECT_CHECK(XiveSource, (obj), TYPE_XIVE_SOURCE)
+OBJECT_DECLARE_SIMPLE_TYPE(XiveSource, XIVE_SOURCE)
/*
* XIVE Interrupt Source characteristics, which define how the ESB are
@@ -176,8 +176,9 @@ typedef struct XiveNotifierClass {
*/
#define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */
#define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */
+#define XIVE_SRC_PQ_DISABLE 0x4 /* Disable check on the PQ state bits */
-typedef struct XiveSource {
+struct XiveSource {
DeviceState parent;
/* IRQs */
@@ -186,18 +187,20 @@ typedef struct XiveSource {
/* PQ bits and LSI assertion bit */
uint8_t *status;
+ uint8_t reset_pq; /* PQ state on reset */
/* ESB memory region */
uint64_t esb_flags;
uint32_t esb_shift;
MemoryRegion esb_mmio;
+ MemoryRegion esb_mmio_emulated;
/* KVM support */
void *esb_mmap;
MemoryRegion esb_mmio_kvm;
XiveNotifier *xive;
-} XiveSource;
+};
/*
* ESB MMIO setting. Can be one page, for both source triggering and
@@ -215,6 +218,11 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc)
xsrc->esb_shift == XIVE_ESB_4K_2PAGE;
}
+static inline size_t xive_source_esb_len(XiveSource *xsrc)
+{
+ return (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
+}
+
/* The trigger page is always the first/even page */
static inline hwaddr xive_source_esb_page(XiveSource *xsrc, uint32_t srcno)
{
@@ -255,6 +263,10 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
#define XIVE_ESB_QUEUED (XIVE_ESB_VAL_P | XIVE_ESB_VAL_Q)
#define XIVE_ESB_OFF XIVE_ESB_VAL_Q
+bool xive_esb_trigger(uint8_t *pq);
+bool xive_esb_eoi(uint8_t *pq);
+uint8_t xive_esb_set(uint8_t *pq, uint8_t value);
+
/*
* "magic" Event State Buffer (ESB) MMIO offsets.
*
@@ -268,6 +280,7 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_INJECT 0x800 /* Store */
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
@@ -276,6 +289,30 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno);
uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq);
+/*
+ * Source status helpers
+ */
+static inline void xive_source_set_status(XiveSource *xsrc, uint32_t srcno,
+ uint8_t status, bool enable)
+{
+ if (enable) {
+ xsrc->status[srcno] |= status;
+ } else {
+ xsrc->status[srcno] &= ~status;
+ }
+}
+
+static inline void xive_source_set_asserted(XiveSource *xsrc, uint32_t srcno,
+ bool enable)
+{
+ xive_source_set_status(xsrc, srcno, XIVE_STATUS_ASSERTED, enable);
+}
+
+static inline bool xive_source_is_asserted(XiveSource *xsrc, uint32_t srcno)
+{
+ return xsrc->status[srcno] & XIVE_STATUS_ASSERTED;
+}
+
void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset,
Monitor *mon);
@@ -298,7 +335,7 @@ void xive_source_set_irq(void *opaque, int srcno, int val);
*/
#define TYPE_XIVE_TCTX "xive-tctx"
-#define XIVE_TCTX(obj) OBJECT_CHECK(XiveTCTX, (obj), TYPE_XIVE_TCTX)
+OBJECT_DECLARE_SIMPLE_TYPE(XiveTCTX, XIVE_TCTX)
/*
* XIVE Thread interrupt Management register rings :
@@ -313,7 +350,7 @@ void xive_source_set_irq(void *opaque, int srcno, int val);
typedef struct XivePresenter XivePresenter;
-typedef struct XiveTCTX {
+struct XiveTCTX {
DeviceState parent_obj;
CPUState *cs;
@@ -323,33 +360,38 @@ typedef struct XiveTCTX {
uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE];
XivePresenter *xptr;
-} XiveTCTX;
+};
+
+static inline uint32_t xive_tctx_word2(uint8_t *ring)
+{
+ return *((uint32_t *) &ring[TM_WORD2]);
+}
/*
* XIVE Router
*/
typedef struct XiveFabric XiveFabric;
-typedef struct XiveRouter {
+struct XiveRouter {
SysBusDevice parent;
XiveFabric *xfb;
-} XiveRouter;
+};
#define TYPE_XIVE_ROUTER "xive-router"
-#define XIVE_ROUTER(obj) \
- OBJECT_CHECK(XiveRouter, (obj), TYPE_XIVE_ROUTER)
-#define XIVE_ROUTER_CLASS(klass) \
- OBJECT_CLASS_CHECK(XiveRouterClass, (klass), TYPE_XIVE_ROUTER)
-#define XIVE_ROUTER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XiveRouterClass, (obj), TYPE_XIVE_ROUTER)
-
-typedef struct XiveRouterClass {
+OBJECT_DECLARE_TYPE(XiveRouter, XiveRouterClass,
+ XIVE_ROUTER)
+
+struct XiveRouterClass {
SysBusDeviceClass parent;
/* XIVE table accessors */
int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
+ int (*get_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*set_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end);
int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
@@ -359,7 +401,8 @@ typedef struct XiveRouterClass {
int (*write_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number);
uint8_t (*get_block_id)(XiveRouter *xrtr);
-} XiveRouterClass;
+ void (*end_notify)(XiveRouter *xrtr, XiveEAS *eas);
+};
int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
@@ -371,7 +414,8 @@ int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt);
int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number);
-void xive_router_notify(XiveNotifier *xn, uint32_t lisn);
+void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
/*
* XIVE Presenter
@@ -385,23 +429,30 @@ typedef struct XiveTCTXMatch {
#define TYPE_XIVE_PRESENTER "xive-presenter"
#define XIVE_PRESENTER(obj) \
INTERFACE_CHECK(XivePresenter, (obj), TYPE_XIVE_PRESENTER)
-#define XIVE_PRESENTER_CLASS(klass) \
- OBJECT_CLASS_CHECK(XivePresenterClass, (klass), TYPE_XIVE_PRESENTER)
-#define XIVE_PRESENTER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XivePresenterClass, (obj), TYPE_XIVE_PRESENTER)
+typedef struct XivePresenterClass XivePresenterClass;
+DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER,
+ TYPE_XIVE_PRESENTER)
-typedef struct XivePresenterClass {
+#define XIVE_PRESENTER_GEN1_TIMA_OS 0x1
+
+struct XivePresenterClass {
InterfaceClass parent;
int (*match_nvt)(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
-} XivePresenterClass;
+ bool (*in_kernel)(const XivePresenter *xptr);
+ uint32_t (*get_config)(XivePresenter *xptr);
+};
int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint32_t logic_serv);
+bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv);
/*
* XIVE Fabric (Interface between Interrupt Controller and Machine)
@@ -410,28 +461,26 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
#define TYPE_XIVE_FABRIC "xive-fabric"
#define XIVE_FABRIC(obj) \
INTERFACE_CHECK(XiveFabric, (obj), TYPE_XIVE_FABRIC)
-#define XIVE_FABRIC_CLASS(klass) \
- OBJECT_CLASS_CHECK(XiveFabricClass, (klass), TYPE_XIVE_FABRIC)
-#define XIVE_FABRIC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XiveFabricClass, (obj), TYPE_XIVE_FABRIC)
+typedef struct XiveFabricClass XiveFabricClass;
+DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC,
+ TYPE_XIVE_FABRIC)
-typedef struct XiveFabricClass {
+struct XiveFabricClass {
InterfaceClass parent;
int (*match_nvt)(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
-} XiveFabricClass;
+};
/*
* XIVE END ESBs
*/
#define TYPE_XIVE_END_SOURCE "xive-end-source"
-#define XIVE_END_SOURCE(obj) \
- OBJECT_CHECK(XiveENDSource, (obj), TYPE_XIVE_END_SOURCE)
+OBJECT_DECLARE_SIMPLE_TYPE(XiveENDSource, XIVE_END_SOURCE)
-typedef struct XiveENDSource {
+struct XiveENDSource {
DeviceState parent;
uint32_t nr_ends;
@@ -441,7 +490,7 @@ typedef struct XiveENDSource {
MemoryRegion esb_mmio;
XiveRouter *xrtr;
-} XiveENDSource;
+};
/*
* For legacy compatibility, the exceptions define up to 256 different
@@ -451,6 +500,17 @@ typedef struct XiveENDSource {
#define XIVE_PRIORITY_MAX 7
/*
+ * Convert a priority number to an Interrupt Pending Buffer (IPB)
+ * register, which indicates a pending interrupt at the priority
+ * corresponding to the bit number
+ */
+static inline uint8_t xive_priority_to_ipb(uint8_t priority)
+{
+ return priority > XIVE_PRIORITY_MAX ?
+ 0 : 1 << (XIVE_PRIORITY_MAX - priority);
+}
+
+/*
* XIVE Thread Interrupt Management Aera (TIMA)
*
* This region gives access to the registers of the thread interrupt
@@ -473,6 +533,7 @@ Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
+void xive_tctx_reset_os_signal(XiveTCTX *tctx);
/*
* KVM XIVE device helpers
@@ -480,9 +541,9 @@ void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp);
void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val);
-void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp);
-void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp);
-void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp);
-void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp);
+int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp);
#endif /* PPC_XIVE_H */
diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h
new file mode 100644
index 0000000000..ab68f8d157
--- /dev/null
+++ b/include/hw/ppc/xive2.h
@@ -0,0 +1,111 @@
+/*
+ * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PPC_XIVE2_H
+#define PPC_XIVE2_H
+
+#include "hw/ppc/xive.h"
+#include "hw/ppc/xive2_regs.h"
+#include "hw/sysbus.h"
+
+/*
+ * XIVE2 Router (POWER10)
+ */
+typedef struct Xive2Router {
+ SysBusDevice parent;
+
+ XiveFabric *xfb;
+} Xive2Router;
+
+#define TYPE_XIVE2_ROUTER "xive2-router"
+OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER);
+
+/*
+ * Configuration flags
+ */
+
+#define XIVE2_GEN1_TIMA_OS 0x00000001
+#define XIVE2_VP_SAVE_RESTORE 0x00000002
+#define XIVE2_THREADID_8BITS 0x00000004
+
+typedef struct Xive2RouterClass {
+ SysBusDeviceClass parent;
+
+ /* XIVE table accessors */
+ int (*get_eas)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ Xive2Eas *eas);
+ int (*get_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*set_pq)(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq);
+ int (*get_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end);
+ int (*write_end)(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end, uint8_t word_number);
+ int (*get_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp);
+ int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp, uint8_t word_number);
+ uint8_t (*get_block_id)(Xive2Router *xrtr);
+ uint32_t (*get_config)(Xive2Router *xrtr);
+} Xive2RouterClass;
+
+int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ Xive2Eas *eas);
+int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end);
+int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end, uint8_t word_number);
+int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp);
+int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp, uint8_t word_number);
+uint32_t xive2_router_get_config(Xive2Router *xrtr);
+
+void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
+
+/*
+ * XIVE2 Presenter (POWER10)
+ */
+
+int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
+ uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint32_t logic_serv);
+
+/*
+ * XIVE2 END ESBs (POWER10)
+ */
+
+#define TYPE_XIVE2_END_SOURCE "xive2-end-source"
+OBJECT_DECLARE_SIMPLE_TYPE(Xive2EndSource, XIVE2_END_SOURCE)
+
+typedef struct Xive2EndSource {
+ DeviceState parent;
+
+ uint32_t nr_ends;
+
+ /* ESB memory region */
+ uint32_t esb_shift;
+ MemoryRegion esb_mmio;
+
+ Xive2Router *xrtr;
+} Xive2EndSource;
+
+/*
+ * XIVE2 Thread Interrupt Management Area (POWER10)
+ */
+
+void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
+ uint64_t value, unsigned size);
+uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size);
+
+#endif /* PPC_XIVE2_H */
diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h
new file mode 100644
index 0000000000..816f5d0e84
--- /dev/null
+++ b/include/hw/ppc/xive2_regs.h
@@ -0,0 +1,212 @@
+/*
+ * QEMU PowerPC XIVE2 internal structure definitions (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_XIVE2_REGS_H
+#define PPC_XIVE2_REGS_H
+
+#include "qemu/bswap.h"
+
+/*
+ * Thread Interrupt Management Area (TIMA)
+ *
+ * In Gen1 mode (P9 compat mode) word 2 is the same. However in Gen2
+ * mode (P10), the CAM line is slightly different as the VP space was
+ * increased.
+ */
+#define TM2_QW0W2_VU PPC_BIT32(0)
+#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
+#define TM2_QW1W2_VO PPC_BIT32(0)
+#define TM2_QW1W2_HO PPC_BIT32(1)
+#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
+#define TM2_QW2W2_VP PPC_BIT32(0)
+#define TM2_QW2W2_HP PPC_BIT32(1)
+#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
+#define TM2_QW3W2_VT PPC_BIT32(0)
+#define TM2_QW3W2_HT PPC_BIT32(1)
+#define TM2_QW3W2_LP PPC_BIT32(6)
+#define TM2_QW3W2_LE PPC_BIT32(7)
+
+/*
+ * Event Assignment Structure (EAS)
+ */
+
+typedef struct Xive2Eas {
+ uint64_t w;
+#define EAS2_VALID PPC_BIT(0)
+#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */
+#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */
+#define EAS2_MASKED PPC_BIT(32) /* Masked */
+#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */
+} Xive2Eas;
+
+#define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID)
+#define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED)
+
+void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon);
+
+/*
+ * Event Notifification Descriptor (END)
+ */
+
+typedef struct Xive2End {
+ uint32_t w0;
+#define END2_W0_VALID PPC_BIT32(0) /* "v" bit */
+#define END2_W0_ENQUEUE PPC_BIT32(5) /* "q" bit */
+#define END2_W0_UCOND_NOTIFY PPC_BIT32(6) /* "n" bit */
+#define END2_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit */
+#define END2_W0_BACKLOG PPC_BIT32(8) /* "b" bit */
+#define END2_W0_PRECL_ESC_CTL PPC_BIT32(9) /* "p" bit */
+#define END2_W0_UNCOND_ESCALATE PPC_BIT32(10) /* "u" bit */
+#define END2_W0_ESCALATE_CTL PPC_BIT32(11) /* "e" bit */
+#define END2_W0_ADAPTIVE_ESC PPC_BIT32(12) /* "a" bit */
+#define END2_W0_ESCALATE_END PPC_BIT32(13) /* "N" bit */
+#define END2_W0_FIRMWARE1 PPC_BIT32(16) /* Owned by FW */
+#define END2_W0_FIRMWARE2 PPC_BIT32(17) /* Owned by FW */
+#define END2_W0_AEC_SIZE PPC_BITMASK32(18, 19)
+#define END2_W0_AEG_SIZE PPC_BITMASK32(20, 23)
+#define END2_W0_EQ_VG_PREDICT PPC_BITMASK32(24, 31) /* Owned by HW */
+ uint32_t w1;
+#define END2_W1_ESn PPC_BITMASK32(0, 1)
+#define END2_W1_ESn_P PPC_BIT32(0)
+#define END2_W1_ESn_Q PPC_BIT32(1)
+#define END2_W1_ESe PPC_BITMASK32(2, 3)
+#define END2_W1_ESe_P PPC_BIT32(2)
+#define END2_W1_ESe_Q PPC_BIT32(3)
+#define END2_W1_GEN_FLIPPED PPC_BIT32(8)
+#define END2_W1_GENERATION PPC_BIT32(9)
+#define END2_W1_PAGE_OFF PPC_BITMASK32(10, 31)
+ uint32_t w2;
+#define END2_W2_RESERVED PPC_BITMASK32(4, 7)
+#define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31)
+ uint32_t w3;
+#define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24)
+#define END2_W3_QSIZE PPC_BITMASK32(28, 31)
+ uint32_t w4;
+#define END2_W4_END_BLOCK PPC_BITMASK32(4, 7)
+#define END2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31)
+#define END2_W4_ESB_BLOCK PPC_BITMASK32(0, 3)
+#define END2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31)
+ uint32_t w5;
+#define END2_W5_ESC_END_DATA PPC_BITMASK32(1, 31)
+ uint32_t w6;
+#define END2_W6_FORMAT_BIT PPC_BIT32(0)
+#define END2_W6_IGNORE PPC_BIT32(1)
+#define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7)
+#define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31)
+#define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31)
+ uint32_t w7;
+#define END2_W7_TOPO PPC_BITMASK32(0, 3) /* Owned by HW */
+#define END2_W7_F0_PRIORITY PPC_BITMASK32(8, 15)
+#define END2_W7_F1_LOG_SERVER_ID PPC_BITMASK32(4, 31)
+} Xive2End;
+
+#define xive2_end_is_valid(end) (be32_to_cpu((end)->w0) & END2_W0_VALID)
+#define xive2_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END2_W0_ENQUEUE)
+#define xive2_end_is_notify(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY)
+#define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG)
+#define xive2_end_is_escalate(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL)
+#define xive2_end_is_uncond_escalation(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_UNCOND_ESCALATE)
+#define xive2_end_is_silent_escalation(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_SILENT_ESCALATE)
+#define xive2_end_is_escalate_end(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_END)
+#define xive2_end_is_firmware1(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1)
+#define xive2_end_is_firmware2(end) \
+ (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2)
+
+static inline uint64_t xive2_end_qaddr(Xive2End *end)
+{
+ return ((uint64_t) be32_to_cpu(end->w2) & END2_W2_EQ_ADDR_HI) << 32 |
+ (be32_to_cpu(end->w3) & END2_W3_EQ_ADDR_LO);
+}
+
+void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon);
+void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
+ Monitor *mon);
+void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
+ Monitor *mon);
+
+/*
+ * Notification Virtual Processor (NVP)
+ */
+typedef struct Xive2Nvp {
+ uint32_t w0;
+#define NVP2_W0_VALID PPC_BIT32(0)
+#define NVP2_W0_HW PPC_BIT32(7)
+#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
+ uint32_t w1;
+#define NVP2_W1_CO PPC_BIT32(13)
+#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
+#define NVP2_W1_CO_THRID_VALID PPC_BIT32(16)
+#define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31)
+ uint32_t w2;
+#define NVP2_W2_CPPR PPC_BITMASK32(0, 7)
+#define NVP2_W2_IPB PPC_BITMASK32(8, 15)
+#define NVP2_W2_LSMFB PPC_BITMASK32(16, 23)
+ uint32_t w3;
+ uint32_t w4;
+#define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */
+#define NVP2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) /* N:0 */
+#define NVP2_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) /* N:1 */
+#define NVP2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) /* N:1 */
+ uint32_t w5;
+#define NVP2_W5_PSIZE PPC_BITMASK32(0, 1)
+#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7)
+#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31)
+ uint32_t w6;
+ uint32_t w7;
+} Xive2Nvp;
+
+#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
+#define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW)
+#define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO)
+
+/*
+ * The VP number space in a block is defined by the END2_W6_VP_OFFSET
+ * field of the XIVE END. When running in Gen1 mode (P9 compat mode),
+ * the VP space is reduced to (1 << 19) VPs per block
+ */
+#define XIVE2_NVP_SHIFT 24
+#define XIVE2_NVP_COUNT (1 << XIVE2_NVP_SHIFT)
+
+static inline uint32_t xive2_nvp_cam_line(uint8_t nvp_blk, uint32_t nvp_idx)
+{
+ return (nvp_blk << XIVE2_NVP_SHIFT) | nvp_idx;
+}
+
+static inline uint32_t xive2_nvp_idx(uint32_t cam_line)
+{
+ return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1);
+}
+
+static inline uint32_t xive2_nvp_blk(uint32_t cam_line)
+{
+ return (cam_line >> XIVE2_NVP_SHIFT) & 0xf;
+}
+
+/*
+ * Notification Virtual Group or Crowd (NVG/NVC)
+ */
+typedef struct Xive2Nvgc {
+ uint32_t w0;
+#define NVGC2_W0_VALID PPC_BIT32(0)
+ uint32_t w1;
+ uint32_t w2;
+ uint32_t w3;
+ uint32_t w4;
+ uint32_t w5;
+ uint32_t w6;
+ uint32_t w7;
+} Xive2Nvgc;
+
+#endif /* PPC_XIVE2_REGS_H */
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index 7879692825..4a3c9badd3 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -48,6 +48,22 @@
#define TM_SHIFT 16
+/*
+ * TIMA addresses are 12-bits (4k page).
+ * The MSB indicates a special op with side effect, which can be
+ * refined with bit 10 (see below).
+ * The registers, logically grouped in 4 rings (a quad-word each), are
+ * defined on the 6 LSBs (offset below 0x40)
+ * In between, we can add a cache line index from 0...3 (ie, 0, 0x80,
+ * 0x100, 0x180) to select a specific snooper. Those 'snoop port
+ * address' bits should be dropped when processing the operations as
+ * they are all equivalent.
+ */
+#define TM_ADDRESS_MASK 0xC3F
+#define TM_SPECIAL_OP 0x800
+#define TM_RING_OFFSET 0x30
+#define TM_REG_OFFSET 0x3F
+
/* TM register offsets */
#define TM_QW0_USER 0x000 /* All rings */
#define TM_QW1_OS 0x010 /* Ring 0..2 */
@@ -236,6 +252,8 @@ typedef struct XiveEND {
(be32_to_cpu((end)->w0) & END_W0_UNCOND_ESCALATE)
#define xive_end_is_silent_escalation(end) \
(be32_to_cpu((end)->w0) & END_W0_SILENT_ESCALATE)
+#define xive_end_is_firmware(end) \
+ (be32_to_cpu((end)->w0) & END_W0_FIRMWARE)
static inline uint64_t xive_end_qaddr(XiveEND *end)
{
diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h
index 412763fffb..4dc02b0de4 100644
--- a/include/hw/ptimer.h
+++ b/include/hw/ptimer.h
@@ -33,9 +33,17 @@
* to stderr when the guest attempts to enable the timer.
*/
-/* The default ptimer policy retains backward compatibility with the legacy
- * timers. Custom policies are adjusting the default one. Consider providing
- * a correct policy for your timer.
+/*
+ * The 'legacy' ptimer policy retains backward compatibility with the
+ * traditional ptimer behaviour from before policy flags were introduced.
+ * It has several weird behaviours which don't match typical hardware
+ * timer behaviour. For a new device using ptimers, you should not
+ * use PTIMER_POLICY_LEGACY, but instead check the actual behaviour
+ * that you need and specify the right set of policy flags to get that.
+ *
+ * If you are overhauling an existing device that uses PTIMER_POLICY_LEGACY
+ * and are in a position to check or test the real hardware behaviour,
+ * consider updating it to specify the right policy flags.
*
* The rough edges of the default policy:
* - Starting to run with a period = 0 emits error message and stops the
@@ -54,7 +62,7 @@
* since the last period, effectively restarting the timer with a
* counter = counter value at the moment of change (.i.e. one less).
*/
-#define PTIMER_POLICY_DEFAULT 0
+#define PTIMER_POLICY_LEGACY 0
/* Periodic timer counter stays with "0" for a one period before wrapping
* around. */
@@ -166,6 +174,28 @@ void ptimer_transaction_commit(ptimer_state *s);
void ptimer_set_period(ptimer_state *s, int64_t period);
/**
+ * ptimer_set_period_from_clock - Set counter increment from a Clock
+ * @s: ptimer to configure
+ * @clk: pointer to Clock object to take period from
+ * @divisor: value to scale the clock frequency down by
+ *
+ * If the ptimer is being driven from a Clock, this is the preferred
+ * way to tell the ptimer about the period, because it avoids any
+ * possible rounding errors that might happen if the internal
+ * representation of the Clock period was converted to either a period
+ * in ns or a frequency in Hz.
+ *
+ * If the ptimer should run at the same frequency as the clock,
+ * pass 1 as the @divisor; if the ptimer should run at half the
+ * frequency, pass 2, and so on.
+ *
+ * This function will assert if it is called outside a
+ * ptimer_transaction_begin/commit block.
+ */
+void ptimer_set_period_from_clock(ptimer_state *s, const Clock *clock,
+ unsigned int divisor);
+
+/**
* ptimer_set_freq - Set counter frequency in Hz
* @s: ptimer to configure
* @freq: counter frequency in Hz
diff --git a/include/hw/qdev-clock.h b/include/hw/qdev-clock.h
index a340f65ff9..ffa0f7ba09 100644
--- a/include/hw/qdev-clock.h
+++ b/include/hw/qdev-clock.h
@@ -22,6 +22,8 @@
* @name: the name of the clock (can't be NULL).
* @callback: optional callback to be called on update or NULL.
* @opaque: argument for the callback
+ * @events: the events the callback should be called for
+ * (logical OR of ClockEvent enum values)
* @returns: a pointer to the newly added clock
*
* Add an input clock to device @dev as a clock named @name.
@@ -29,7 +31,8 @@
* The callback will be called with @opaque as opaque parameter.
*/
Clock *qdev_init_clock_in(DeviceState *dev, const char *name,
- ClockCallback *callback, void *opaque);
+ ClockCallback *callback, void *opaque,
+ unsigned int events);
/**
* qdev_init_clock_out:
@@ -70,12 +73,10 @@ Clock *qdev_get_clock_out(DeviceState *dev, const char *name);
*
* Set the source clock of input clock @name of device @dev to @source.
* @source period update will be propagated to @name clock.
+ *
+ * Must be called before @dev is realized.
*/
-static inline void qdev_connect_clock_in(DeviceState *dev, const char *name,
- Clock *source)
-{
- clock_set_source(qdev_get_clock_in(dev, name), source);
-}
+void qdev_connect_clock_in(DeviceState *dev, const char *name, Clock *source);
/**
* qdev_alias_clock:
@@ -107,6 +108,7 @@ void qdev_finalize_clocklist(DeviceState *dev);
* @output: indicates whether the clock is input or output
* @callback: for inputs, optional callback to be called on clock's update
* with device as opaque
+ * @callback_events: mask of ClockEvent values for when callback is called
* @offset: optional offset to store the ClockIn or ClockOut pointer in device
* state structure (0 means unused)
*/
@@ -114,6 +116,7 @@ struct ClockPortInitElem {
const char *name;
bool is_output;
ClockCallback *callback;
+ unsigned int callback_events;
size_t offset;
};
@@ -121,10 +124,11 @@ struct ClockPortInitElem {
(offsetof(devstate, field) + \
type_check(Clock *, typeof_field(devstate, field)))
-#define QDEV_CLOCK(out_not_in, devstate, field, cb) { \
+#define QDEV_CLOCK(out_not_in, devstate, field, cb, cbevents) { \
.name = (stringify(field)), \
.is_output = out_not_in, \
.callback = cb, \
+ .callback_events = cbevents, \
.offset = clock_offset_value(devstate, field), \
}
@@ -135,14 +139,15 @@ struct ClockPortInitElem {
* @field: a field in @_devstate (must be Clock*)
* @callback: (for input only) callback (or NULL) to be called with the device
* state as argument
+ * @cbevents: (for input only) ClockEvent mask for when callback is called
*
* The name of the clock will be derived from @field
*/
-#define QDEV_CLOCK_IN(devstate, field, callback) \
- QDEV_CLOCK(false, devstate, field, callback)
+#define QDEV_CLOCK_IN(devstate, field, callback, cbevents) \
+ QDEV_CLOCK(false, devstate, field, callback, cbevents)
#define QDEV_CLOCK_OUT(devstate, field) \
- QDEV_CLOCK(true, devstate, field, NULL)
+ QDEV_CLOCK(true, devstate, field, NULL, 0)
#define QDEV_CLOCK_END { .name = NULL }
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index ea3f73a282..9228e96c87 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -1,20 +1,80 @@
#ifndef QDEV_CORE_H
#define QDEV_CORE_H
+#include "qemu/atomic.h"
#include "qemu/queue.h"
#include "qemu/bitmap.h"
+#include "qemu/rcu.h"
+#include "qemu/rcu_queue.h"
#include "qom/object.h"
#include "hw/hotplug.h"
#include "hw/resettable.h"
+/**
+ * DOC: The QEMU Device API
+ *
+ * All modern devices should represented as a derived QOM class of
+ * TYPE_DEVICE. The device API introduces the additional methods of
+ * @realize and @unrealize to represent additional stages in a device
+ * objects life cycle.
+ *
+ * Realization
+ * -----------
+ *
+ * Devices are constructed in two stages:
+ *
+ * 1) object instantiation via object_initialize() and
+ * 2) device realization via the #DeviceState.realized property
+ *
+ * The former may not fail (and must not abort or exit, since it is called
+ * during device introspection already), and the latter may return error
+ * information to the caller and must be re-entrant.
+ * Trivial field initializations should go into #TypeInfo.instance_init.
+ * Operations depending on @props static properties should go into @realize.
+ * After successful realization, setting static properties will fail.
+ *
+ * As an interim step, the #DeviceState.realized property can also be
+ * set with qdev_realize(). In the future, devices will propagate this
+ * state change to their children and along busses they expose. The
+ * point in time will be deferred to machine creation, so that values
+ * set in @realize will not be introspectable beforehand. Therefore
+ * devices must not create children during @realize; they should
+ * initialize them via object_initialize() in their own
+ * #TypeInfo.instance_init and forward the realization events
+ * appropriately.
+ *
+ * Any type may override the @realize and/or @unrealize callbacks but needs
+ * to call the parent type's implementation if keeping their functionality
+ * is desired. Refer to QOM documentation for further discussion and examples.
+ *
+ * .. note::
+ * Since TYPE_DEVICE doesn't implement @realize and @unrealize, types
+ * derived directly from it need not call their parent's @realize and
+ * @unrealize. For other types consult the documentation and
+ * implementation of the respective parent types.
+ *
+ * Hiding a device
+ * ---------------
+ *
+ * To hide a device, a DeviceListener function hide_device() needs to
+ * be registered. It can be used to defer adding a device and
+ * therefore hide it from the guest. The handler registering to this
+ * DeviceListener can save the QOpts passed to it for re-using it
+ * later. It must return if it wants the device to be hidden or
+ * visible. When the handler function decides the device shall be
+ * visible it will be added with qdev_device_add() and realized as any
+ * other device. Otherwise qdev_device_add() will return early without
+ * adding the device. The guest will not see a "hidden" device until
+ * it was marked visible and qdev_device_add called again.
+ *
+ */
+
enum {
DEV_NVECTORS_UNSPECIFIED = -1,
};
#define TYPE_DEVICE "device"
-#define DEVICE(obj) OBJECT_CHECK(DeviceState, (obj), TYPE_DEVICE)
-#define DEVICE_CLASS(klass) OBJECT_CLASS_CHECK(DeviceClass, (klass), TYPE_DEVICE)
-#define DEVICE_GET_CLASS(obj) OBJECT_GET_CLASS(DeviceClass, (obj), TYPE_DEVICE)
+OBJECT_DECLARE_TYPE(DeviceState, DeviceClass, DEVICE)
typedef enum DeviceCategory {
DEVICE_CATEGORY_BRIDGE,
@@ -26,6 +86,7 @@ typedef enum DeviceCategory {
DEVICE_CATEGORY_SOUND,
DEVICE_CATEGORY_MISC,
DEVICE_CATEGORY_CPU,
+ DEVICE_CATEGORY_WATCHDOG,
DEVICE_CATEGORY_MAX
} DeviceCategory;
@@ -36,7 +97,7 @@ typedef void (*BusRealize)(BusState *bus, Error **errp);
typedef void (*BusUnrealize)(BusState *bus);
/**
- * DeviceClass:
+ * struct DeviceClass - The base class for all devices.
* @props: Properties accessing state fields.
* @realize: Callback function invoked when the #DeviceState:realized
* property is changed to %true.
@@ -45,71 +106,37 @@ typedef void (*BusUnrealize)(BusState *bus);
* @hotpluggable: indicates if #DeviceClass is hotpluggable, available
* as readonly "hotpluggable" property of #DeviceState instance
*
- * # Realization #
- * Devices are constructed in two stages,
- * 1) object instantiation via object_initialize() and
- * 2) device realization via #DeviceState:realized property.
- * The former may not fail (and must not abort or exit, since it is called
- * during device introspection already), and the latter may return error
- * information to the caller and must be re-entrant.
- * Trivial field initializations should go into #TypeInfo.instance_init.
- * Operations depending on @props static properties should go into @realize.
- * After successful realization, setting static properties will fail.
- *
- * As an interim step, the #DeviceState:realized property can also be
- * set with qdev_realize().
- * In the future, devices will propagate this state change to their children
- * and along busses they expose.
- * The point in time will be deferred to machine creation, so that values
- * set in @realize will not be introspectable beforehand. Therefore devices
- * must not create children during @realize; they should initialize them via
- * object_initialize() in their own #TypeInfo.instance_init and forward the
- * realization events appropriately.
- *
- * Any type may override the @realize and/or @unrealize callbacks but needs
- * to call the parent type's implementation if keeping their functionality
- * is desired. Refer to QOM documentation for further discussion and examples.
- *
- * <note>
- * <para>
- * Since TYPE_DEVICE doesn't implement @realize and @unrealize, types
- * derived directly from it need not call their parent's @realize and
- * @unrealize.
- * For other types consult the documentation and implementation of the
- * respective parent types.
- * </para>
- * </note>
- *
- * # Hiding a device #
- * To hide a device, a DeviceListener function should_be_hidden() needs to
- * be registered.
- * It can be used to defer adding a device and therefore hide it from the
- * guest. The handler registering to this DeviceListener can save the QOpts
- * passed to it for re-using it later and must return that it wants the device
- * to be/remain hidden or not. When the handler function decides the device
- * shall not be hidden it will be added in qdev_device_add() and
- * realized as any other device. Otherwise qdev_device_add() will return early
- * without adding the device. The guest will not see a "hidden" device
- * until it was marked don't hide and qdev_device_add called again.
- *
- */
-typedef struct DeviceClass {
- /*< private >*/
+ */
+struct DeviceClass {
+ /* private: */
ObjectClass parent_class;
- /*< public >*/
+ /* public: */
+
+ /**
+ * @categories: device categories device belongs to
+ */
DECLARE_BITMAP(categories, DEVICE_CATEGORY_MAX);
+ /**
+ * @fw_name: name used to identify device to firmware interfaces
+ */
const char *fw_name;
+ /**
+ * @desc: human readable description of device
+ */
const char *desc;
- /*
- * The underscore at the end ensures a compile-time error if someone
- * assigns to dc->props instead of using device_class_set_props.
+ /**
+ * @props_: properties associated with device, should only be
+ * assigned by using device_class_set_props(). The underscore
+ * ensures a compile-time error if someone attempts to assign
+ * dc->props directly.
*/
Property *props_;
- /*
- * Can this device be instantiated with -device / device_add?
+ /**
+ * @user_creatable: Can user instantiate with -device / device_add?
+ *
* All devices should support instantiation with device_add, and
* this flag should not exist. But we're not there, yet. Some
* devices fail to instantiate with cryptic error messages.
@@ -117,27 +144,37 @@ typedef struct DeviceClass {
* behavior would be cruel; clearing this flag will protect them.
* It should never be cleared without a comment explaining why it
* is cleared.
+ *
* TODO remove once we're there
*/
bool user_creatable;
bool hotpluggable;
/* callbacks */
- /*
- * Reset method here is deprecated and replaced by methods in the
- * resettable class interface to implement a multi-phase reset.
+ /**
+ * @reset: deprecated device reset method pointer
+ *
+ * Modern code should use the ResettableClass interface to
+ * implement a multi-phase reset.
+ *
* TODO: remove once every reset callback is unused
*/
DeviceReset reset;
DeviceRealize realize;
DeviceUnrealize unrealize;
- /* device state */
+ /**
+ * @vmsd: device state serialisation description for
+ * migration/save/restore
+ */
const VMStateDescription *vmsd;
- /* Private to qdev / bus. */
+ /**
+ * @bus_type: bus type
+ * private: to qdev / bus.
+ */
const char *bus_type;
-} DeviceClass;
+};
typedef struct NamedGPIOList NamedGPIOList;
@@ -160,52 +197,123 @@ struct NamedClockList {
QLIST_ENTRY(NamedClockList) node;
};
+typedef struct {
+ bool engaged_in_io;
+} MemReentrancyGuard;
+
+
+typedef QLIST_HEAD(, NamedGPIOList) NamedGPIOListHead;
+typedef QLIST_HEAD(, NamedClockList) NamedClockListHead;
+typedef QLIST_HEAD(, BusState) BusStateHead;
+
/**
- * DeviceState:
- * @realized: Indicates whether the device has been fully constructed.
- * @reset: ResettableState for the device; handled by Resettable interface.
+ * struct DeviceState - common device state, accessed with qdev helpers
*
* This structure should not be accessed directly. We declare it here
* so that it can be embedded in individual device state structures.
*/
struct DeviceState {
- /*< private >*/
+ /* private: */
Object parent_obj;
- /*< public >*/
+ /* public: */
- const char *id;
+ /**
+ * @id: global device id
+ */
+ char *id;
+ /**
+ * @canonical_path: canonical path of realized device in the QOM tree
+ */
char *canonical_path;
+ /**
+ * @realized: has device been realized?
+ */
bool realized;
+ /**
+ * @pending_deleted_event: track pending deletion events during unplug
+ */
bool pending_deleted_event;
- QemuOpts *opts;
+ /**
+ * @pending_deleted_expires_ms: optional timeout for deletion events
+ */
+ int64_t pending_deleted_expires_ms;
+ /**
+ * @opts: QDict of options for the device
+ */
+ QDict *opts;
+ /**
+ * @hotplugged: was device added after PHASE_MACHINE_READY?
+ */
int hotplugged;
+ /**
+ * @allow_unplug_during_migration: can device be unplugged during migration
+ */
bool allow_unplug_during_migration;
+ /**
+ * @parent_bus: bus this device belongs to
+ */
BusState *parent_bus;
- QLIST_HEAD(, NamedGPIOList) gpios;
- QLIST_HEAD(, NamedClockList) clocks;
- QLIST_HEAD(, BusState) child_bus;
+ /**
+ * @gpios: QLIST of named GPIOs the device provides.
+ */
+ NamedGPIOListHead gpios;
+ /**
+ * @clocks: QLIST of named clocks the device provides.
+ */
+ NamedClockListHead clocks;
+ /**
+ * @child_bus: QLIST of child buses
+ */
+ BusStateHead child_bus;
+ /**
+ * @num_child_bus: number of @child_bus entries
+ */
int num_child_bus;
+ /**
+ * @instance_id_alias: device alias for handling legacy migration setups
+ */
int instance_id_alias;
+ /**
+ * @alias_required_for_version: indicates @instance_id_alias is
+ * needed for migration
+ */
int alias_required_for_version;
+ /**
+ * @reset: ResettableState for the device; handled by Resettable interface.
+ */
ResettableState reset;
+ /**
+ * @unplug_blockers: list of reasons to block unplugging of device
+ */
+ GSList *unplug_blockers;
+ /**
+ * @mem_reentrancy_guard: Is the device currently in mmio/pio/dma?
+ *
+ * Used to prevent re-entrancy confusing things.
+ */
+ MemReentrancyGuard mem_reentrancy_guard;
};
struct DeviceListener {
void (*realize)(DeviceListener *listener, DeviceState *dev);
void (*unrealize)(DeviceListener *listener, DeviceState *dev);
/*
- * This callback is called upon init of the DeviceState and allows to
- * inform qdev that a device should be hidden, depending on the device
- * opts, for example, to hide a standby device.
+ * This callback is called upon init of the DeviceState and
+ * informs qdev if a device should be visible or hidden. We can
+ * hide a failover device depending for example on the device
+ * opts.
+ *
+ * On errors, it returns false and errp is set. Device creation
+ * should fail in this case.
*/
- int (*should_be_hidden)(DeviceListener *listener, QemuOpts *device_opts);
+ bool (*hide_device)(DeviceListener *listener, const QDict *device_opts,
+ bool from_json, Error **errp);
QTAILQ_ENTRY(DeviceListener) link;
};
#define TYPE_BUS "bus"
-#define BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_BUS)
-#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS)
-#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS)
+DECLARE_OBJ_CHECKERS(BusState, BusClass,
+ BUS, TYPE_BUS)
struct BusClass {
ObjectClass parent_class;
@@ -213,13 +321,22 @@ struct BusClass {
/* FIXME first arg should be BusState */
void (*print_dev)(Monitor *mon, DeviceState *dev, int indent);
char *(*get_dev_path)(DeviceState *dev);
+
/*
* This callback is used to create Open Firmware device path in accordance
* with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus
* bindings can be found at http://playground.sun.com/1275/bindings/.
*/
char *(*get_fw_dev_path)(DeviceState *dev);
- void (*reset)(BusState *bus);
+
+ /*
+ * Return whether the device can be added to @bus,
+ * based on the address that was set (via device properties)
+ * before realize. If not, on return @errp contains the
+ * human-readable error message.
+ */
+ bool (*check_address)(BusState *bus, DeviceState *dev, Error **errp);
+
BusRealize realize;
BusUnrealize unrealize;
@@ -230,6 +347,7 @@ struct BusClass {
};
typedef struct BusChild {
+ struct rcu_head rcu;
DeviceState *child;
int index;
QTAILQ_ENTRY(BusChild) sibling;
@@ -237,63 +355,50 @@ typedef struct BusChild {
#define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler"
+typedef QTAILQ_HEAD(, BusChild) BusChildHead;
+typedef QLIST_ENTRY(BusState) BusStateEntry;
+
/**
- * BusState:
+ * struct BusState:
+ * @obj: parent object
+ * @parent: parent Device
+ * @name: name of bus
* @hotplug_handler: link to a hotplug handler associated with bus.
- * @reset: ResettableState for the bus; handled by Resettable interface.
+ * @max_index: max number of child buses
+ * @realized: is the bus itself realized?
+ * @full: is the bus full?
+ * @num_children: current number of child buses
*/
struct BusState {
+ /* private: */
Object obj;
+ /* public: */
DeviceState *parent;
char *name;
HotplugHandler *hotplug_handler;
int max_index;
bool realized;
+ bool full;
int num_children;
- QTAILQ_HEAD(, BusChild) children;
- QLIST_ENTRY(BusState) sibling;
- ResettableState reset;
-};
-
-/**
- * Property:
- * @set_default: true if the default value should be set from @defval,
- * in which case @info->set_default_value must not be NULL
- * (if false then no default value is set by the property system
- * and the field retains whatever value it was given by instance_init).
- * @defval: default value for the property. This is used only if @set_default
- * is true.
- */
-struct Property {
- const char *name;
- const PropertyInfo *info;
- ptrdiff_t offset;
- uint8_t bitnr;
- bool set_default;
- union {
- int64_t i;
- uint64_t u;
- } defval;
- int arrayoffset;
- const PropertyInfo *arrayinfo;
- int arrayfieldsize;
- const char *link_type;
-};
-struct PropertyInfo {
- const char *name;
- const char *description;
- const QEnumLookup *enum_table;
- int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len);
- void (*set_default_value)(ObjectProperty *op, const Property *prop);
- void (*create)(ObjectClass *oc, Property *prop);
- ObjectPropertyAccessor *get;
- ObjectPropertyAccessor *set;
- ObjectPropertyRelease *release;
+ /**
+ * @children: an RCU protected QTAILQ, thus readers must use RCU
+ * to access it, and writers must hold the big qemu lock
+ */
+ BusChildHead children;
+ /**
+ * @sibling: next bus
+ */
+ BusStateEntry sibling;
+ /**
+ * @reset: ResettableState for the bus; handled by Resettable interface.
+ */
+ ResettableState reset;
};
/**
- * GlobalProperty:
+ * typedef GlobalProperty - a global property type
+ *
* @used: Set to true if property was used when initializing a device.
* @optional: If set to true, GlobalProperty will be skipped without errors
* if the property doesn't exist.
@@ -327,17 +432,35 @@ compat_props_add(GPtrArray *arr,
* This only allocates the memory and initializes the device state
* structure, ready for the caller to set properties if they wish.
* The device still needs to be realized.
- * The returned object has a reference count of 1.
+ *
+ * Return: a derived DeviceState object with a reference count of 1.
*/
DeviceState *qdev_new(const char *name);
+
/**
* qdev_try_new: Try to create a device on the heap
* @name: device type to create
*
* This is like qdev_new(), except it returns %NULL when type @name
* does not exist, rather than asserting.
+ *
+ * Return: a derived DeviceState object with a reference count of 1 or
+ * NULL if type @name does not exist.
*/
DeviceState *qdev_try_new(const char *name);
+
+/**
+ * qdev_is_realized() - check if device is realized
+ * @dev: The device to check.
+ *
+ * Context: May be called outside big qemu lock.
+ * Return: true if the device has been fully constructed, false otherwise.
+ */
+static inline bool qdev_is_realized(DeviceState *dev)
+{
+ return qatomic_load_acquire(&dev->realized);
+}
+
/**
* qdev_realize: Realize @dev.
* @dev: device to realize
@@ -349,13 +472,14 @@ DeviceState *qdev_try_new(const char *name);
* @dev must not be plugged into a bus already.
* If @bus, plug @dev into @bus. This takes a reference to @dev.
* If @dev has no QOM parent, make one up, taking another reference.
- * On success, return true.
- * On failure, store an error through @errp and return false.
*
* If you created @dev using qdev_new(), you probably want to use
* qdev_realize_and_unref() instead.
+ *
+ * Return: true on success, else false setting @errp with error
*/
bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp);
+
/**
* qdev_realize_and_unref: Realize @dev and drop a reference
* @dev: device to realize
@@ -379,8 +503,11 @@ bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp);
* for the only reference to the child device to be held by the parent
* via the child<> property, and so the reference-count-drop done here
* would be incorrect. For that use case you want qdev_realize().
+ *
+ * Return: true on success, else false setting @errp with error
*/
bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp);
+
/**
* qdev_unrealize: Unrealize a device
* @dev: device to unrealize
@@ -390,7 +517,7 @@ bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp);
*
* - unrealize any child buses by calling qbus_unrealize()
* (this will recursively unrealize any devices on those buses)
- * - call the the unrealize method of @dev
+ * - call the unrealize method of @dev
*
* The device can then be freed by causing its reference count to go
* to zero.
@@ -406,16 +533,16 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id,
HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev);
HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev);
bool qdev_hotplug_allowed(DeviceState *dev, Error **errp);
+
/**
- * qdev_get_hotplug_handler: Get handler responsible for device wiring
- *
- * Find HOTPLUG_HANDLER for @dev that provides [pre|un]plug callbacks for it.
+ * qdev_get_hotplug_handler() - Get handler responsible for device wiring
+ * @dev: the device we want the HOTPLUG_HANDLER for.
*
* Note: in case @dev has a parent bus, it will be returned as handler unless
* machine handler overrides it.
*
- * Returns: pointer to object that implements TYPE_HOTPLUG_HANDLER interface
- * or NULL if there aren't any.
+ * Return: pointer to object that implements TYPE_HOTPLUG_HANDLER interface
+ * or NULL if there aren't any.
*/
HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev);
void qdev_unplug(DeviceState *dev, Error **errp);
@@ -425,6 +552,50 @@ void qdev_machine_creation_done(void);
bool qdev_machine_modified(void);
/**
+ * qdev_add_unplug_blocker: Add an unplug blocker to a device
+ *
+ * @dev: Device to be blocked from unplug
+ * @reason: Reason for blocking
+ */
+void qdev_add_unplug_blocker(DeviceState *dev, Error *reason);
+
+/**
+ * qdev_del_unplug_blocker: Remove an unplug blocker from a device
+ *
+ * @dev: Device to be unblocked
+ * @reason: Pointer to the Error used with qdev_add_unplug_blocker.
+ * Used as a handle to lookup the blocker for deletion.
+ */
+void qdev_del_unplug_blocker(DeviceState *dev, Error *reason);
+
+/**
+ * qdev_unplug_blocked: Confirm if a device is blocked from unplug
+ *
+ * @dev: Device to be tested
+ * @errp: The reasons why the device is blocked, if any
+ *
+ * Returns: true (also setting @errp) if device is blocked from unplug,
+ * false otherwise
+ */
+bool qdev_unplug_blocked(DeviceState *dev, Error **errp);
+
+/**
+ * typedef GpioPolarity - Polarity of a GPIO line
+ *
+ * GPIO lines use either positive (active-high) logic,
+ * or negative (active-low) logic.
+ *
+ * In active-high logic (%GPIO_POLARITY_ACTIVE_HIGH), a pin is
+ * active when the voltage on the pin is high (relative to ground);
+ * whereas in active-low logic (%GPIO_POLARITY_ACTIVE_LOW), a pin
+ * is active when the voltage on the pin is low (or grounded).
+ */
+typedef enum {
+ GPIO_POLARITY_ACTIVE_LOW,
+ GPIO_POLARITY_ACTIVE_HIGH
+} GpioPolarity;
+
+/**
* qdev_get_gpio_in: Get one of a device's anonymous input GPIO lines
* @dev: Device whose GPIO we want
* @n: Number of the anonymous GPIO line (which must be in range)
@@ -441,8 +612,11 @@ bool qdev_machine_modified(void);
* connect another device's output GPIO line to this input.
*
* For named input GPIO lines, use qdev_get_gpio_in_named().
+ *
+ * Return: qemu_irq corresponding to anonymous input GPIO line
*/
qemu_irq qdev_get_gpio_in(DeviceState *dev, int n);
+
/**
* qdev_get_gpio_in_named: Get one of a device's named input GPIO lines
* @dev: Device whose GPIO we want
@@ -457,6 +631,8 @@ qemu_irq qdev_get_gpio_in(DeviceState *dev, int n);
* array); this function will assert() if passed an invalid name or index.
*
* For anonymous input GPIO lines, use qdev_get_gpio_in().
+ *
+ * Return: qemu_irq corresponding to named input GPIO line
*/
qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n);
@@ -481,7 +657,7 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n);
* qemu_irqs at once, or to connect multiple outbound GPIOs to the
* same qemu_irq. (Warning: there is no assertion or other guard to
* catch this error: the model will just not do the right thing.)
- * Instead, for fan-out you can use the TYPE_IRQ_SPLIT device: connect
+ * Instead, for fan-out you can use the TYPE_SPLIT_IRQ device: connect
* a device's outbound GPIO to the splitter's input, and connect each
* of the splitter's outputs to a different device. For fan-in you
* can use the TYPE_OR_IRQ device, which is a model of a logical OR
@@ -490,12 +666,14 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n);
* For named output GPIO lines, use qdev_connect_gpio_out_named().
*/
void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin);
+
/**
- * qdev_connect_gpio_out: Connect one of a device's anonymous output GPIO lines
+ * qdev_connect_gpio_out_named: Connect one of a device's named output
+ * GPIO lines
* @dev: Device whose GPIO to connect
* @name: Name of the output GPIO array
* @n: Number of the anonymous output GPIO line (which must be in range)
- * @pin: qemu_irq to connect the output line to
+ * @input_pin: qemu_irq to connect the output line to
*
* This function connects an anonymous output GPIO line on a device
* up to an arbitrary qemu_irq, so that when the device asserts that
@@ -513,10 +691,11 @@ void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin);
* qemu_irqs at once, or to connect multiple outbound GPIOs to the
* same qemu_irq; see qdev_connect_gpio_out() for details.
*
- * For named output GPIO lines, use qdev_connect_gpio_out_named().
+ * For anonymous output GPIO lines, use qdev_connect_gpio_out().
*/
void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n,
- qemu_irq pin);
+ qemu_irq input_pin);
+
/**
* qdev_get_gpio_out_connector: Get the qemu_irq connected to an output GPIO
* @dev: Device whose output GPIO we are interested in
@@ -532,8 +711,11 @@ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n,
*
* You probably don't need to use this function -- it is used only
* by the platform-bus subsystem.
+ *
+ * Return: qemu_irq associated with GPIO or NULL if un-wired.
*/
qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n);
+
/**
* qdev_intercept_gpio_out: Intercept an existing GPIO connection
* @dev: Device to intercept the outbound GPIO line from
@@ -541,14 +723,17 @@ qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n);
* @name: Name of the output GPIO array
* @n: Number of the GPIO line in the array
*
- * This function is provided only for use by the qtest testing framework
- * and is not suitable for use in non-testing parts of QEMU.
+ * .. note::
+ * This function is provided only for use by the qtest testing framework
+ * and is not suitable for use in non-testing parts of QEMU.
*
* This function breaks an existing connection of an outbound GPIO
* line from @dev, and replaces it with the new qemu_irq @icpt, as if
* ``qdev_connect_gpio_out_named(dev, icpt, name, n)`` had been called.
* The previously connected qemu_irq is returned, so it can be restored
* by a second call to qdev_intercept_gpio_out() if desired.
+ *
+ * Return: old disconnected qemu_irq if one existed
*/
qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt,
const char *name, int n);
@@ -575,6 +760,7 @@ BusState *qdev_get_child_bus(DeviceState *dev, const char *name);
* hold of an input GPIO line to manipulate it.
*/
void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n);
+
/**
* qdev_init_gpio_out: create an array of anonymous output GPIO lines
* @dev: Device to create output GPIOs for
@@ -597,10 +783,15 @@ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n);
*
* See qdev_connect_gpio_out() for how code that uses such a device
* can connect to one of its output GPIO lines.
+ *
+ * There is no need to release the @pins allocated array because it
+ * will be automatically released when @dev calls its instance_finalize()
+ * handler.
*/
void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n);
+
/**
- * qdev_init_gpio_out: create an array of named output GPIO lines
+ * qdev_init_gpio_out_named: create an array of named output GPIO lines
* @dev: Device to create output GPIOs for
* @pins: Pointer to qemu_irq or qemu_irq array for the GPIO lines
* @name: Name to give this array of GPIO lines
@@ -612,10 +803,9 @@ void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n);
*/
void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins,
const char *name, int n);
+
/**
- * qdev_init_gpio_in_named_with_opaque: create an array of input GPIO lines
- * for the specified device
- *
+ * qdev_init_gpio_in_named_with_opaque() - create an array of input GPIO lines
* @dev: Device to create input GPIOs for
* @handler: Function to call when GPIO line value is set
* @opaque: Opaque data pointer to pass to @handler
@@ -628,8 +818,11 @@ void qdev_init_gpio_in_named_with_opaque(DeviceState *dev,
const char *name, int n);
/**
- * qdev_init_gpio_in_named: create an array of input GPIO lines
- * for the specified device
+ * qdev_init_gpio_in_named() - create an array of input GPIO lines
+ * @dev: device to add array to
+ * @handler: a &typedef qemu_irq_handler function to call when GPIO is set
+ * @name: Name of the GPIO input (must be unique for this device)
+ * @n: Number of GPIO lines in this input set
*
* Like qdev_init_gpio_in_named_with_opaque(), but the opaque pointer
* passed to the handler is @dev (which is the most commonly desired behaviour).
@@ -663,7 +856,7 @@ static inline void qdev_init_gpio_in_named(DeviceState *dev,
void qdev_pass_gpios(DeviceState *dev, DeviceState *container,
const char *name);
-BusState *qdev_get_parent_bus(DeviceState *dev);
+BusState *qdev_get_parent_bus(const DeviceState *dev);
/*** BUS API. ***/
@@ -673,9 +866,9 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id);
typedef int (qbus_walkerfn)(BusState *bus, void *opaque);
typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque);
-void qbus_create_inplace(void *bus, size_t size, const char *typename,
- DeviceState *parent, const char *name);
-BusState *qbus_create(const char *typename, DeviceState *parent, const char *name);
+void qbus_init(void *bus, size_t size, const char *typename,
+ DeviceState *parent, const char *name);
+BusState *qbus_new(const char *typename, DeviceState *parent, const char *name);
bool qbus_realize(BusState *bus, Error **errp);
void qbus_unrealize(BusState *bus);
@@ -692,40 +885,17 @@ int qdev_walk_children(DeviceState *dev,
void *opaque);
/**
- * @qdev_reset_all:
- * Reset @dev. See @qbus_reset_all() for more details.
- *
- * Note: This function is deprecated and will be removed when it becomes unused.
- * Please use device_cold_reset() now.
- */
-void qdev_reset_all(DeviceState *dev);
-void qdev_reset_all_fn(void *opaque);
-
-/**
- * @qbus_reset_all:
- * @bus: Bus to be reset.
+ * device_cold_reset() - perform a recursive cold reset on a device
+ * @dev: device to reset.
*
- * Reset @bus and perform a bus-level ("hard") reset of all devices connected
- * to it, including recursive processing of all buses below @bus itself. A
- * hard reset means that qbus_reset_all will reset all state of the device.
- * For PCI devices, for example, this will include the base address registers
- * or configuration space.
- *
- * Note: This function is deprecated and will be removed when it becomes unused.
- * Please use bus_cold_reset() now.
- */
-void qbus_reset_all(BusState *bus);
-void qbus_reset_all_fn(void *opaque);
-
-/**
- * device_cold_reset:
* Reset device @dev and perform a recursive processing using the resettable
* interface. It triggers a RESET_TYPE_COLD.
*/
void device_cold_reset(DeviceState *dev);
/**
- * bus_cold_reset:
+ * bus_cold_reset() - perform a recursive cold reset on a bus
+ * @bus: bus to reset
*
* Reset bus @bus and perform a recursive processing using the resettable
* interface. It triggers a RESET_TYPE_COLD.
@@ -733,14 +903,18 @@ void device_cold_reset(DeviceState *dev);
void bus_cold_reset(BusState *bus);
/**
- * device_is_in_reset:
- * Return true if the device @dev is currently being reset.
+ * device_is_in_reset() - check device reset state
+ * @dev: device to check
+ *
+ * Return: true if the device @dev is currently being reset.
*/
bool device_is_in_reset(DeviceState *dev);
/**
- * bus_is_in_reset:
- * Return true if the bus @bus is currently being reset.
+ * bus_is_in_reset() - check bus reset state
+ * @bus: bus to check
+ *
+ * Return: true if the bus @bus is currently being reset.
*/
bool bus_is_in_reset(BusState *bus);
@@ -751,35 +925,61 @@ char *qdev_get_fw_dev_path(DeviceState *dev);
char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev);
/**
- * @qdev_machine_init
+ * device_class_set_props(): add a set of properties to an device
+ * @dc: the parent DeviceClass all devices inherit
+ * @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST()
*
- * Initialize platform devices before machine init. This is a hack until full
- * support for composition is added.
+ * This will add a set of properties to the object. It will fault if
+ * you attempt to add an existing property defined by a parent class.
+ * To modify an inherited property you need to use????
*/
-void qdev_machine_init(void);
-
-/**
- * device_legacy_reset:
- *
- * Reset a single device (by calling the reset method).
- * Note: This function is deprecated and will be removed when it becomes unused.
- * Please use device_cold_reset() now.
- */
-void device_legacy_reset(DeviceState *dev);
-
void device_class_set_props(DeviceClass *dc, Property *props);
/**
- * device_class_set_parent_reset:
+ * device_class_set_parent_reset() - legacy set device reset handlers
+ * @dc: device class
+ * @dev_reset: function pointer to reset handler
+ * @parent_reset: function pointer to parents reset handler
+ *
+ * Modern code should use the ResettableClass interface to
+ * implement a multi-phase reset instead.
+ *
* TODO: remove the function when DeviceClass's reset method
* is not used anymore.
*/
void device_class_set_parent_reset(DeviceClass *dc,
DeviceReset dev_reset,
DeviceReset *parent_reset);
+
+/**
+ * device_class_set_parent_realize() - set up for chaining realize fns
+ * @dc: The device class
+ * @dev_realize: the device realize function
+ * @parent_realize: somewhere to save the parents realize function
+ *
+ * This is intended to be used when the new realize function will
+ * eventually call its parent realization function during creation.
+ * This requires storing the function call somewhere (usually in the
+ * instance structure) so you can eventually call
+ * dc->parent_realize(dev, errp)
+ */
void device_class_set_parent_realize(DeviceClass *dc,
DeviceRealize dev_realize,
DeviceRealize *parent_realize);
+
+
+/**
+ * device_class_set_parent_unrealize() - set up for chaining unrealize fns
+ * @dc: The device class
+ * @dev_unrealize: the device realize function
+ * @parent_unrealize: somewhere to save the parents unrealize function
+ *
+ * This is intended to be used when the new unrealize function will
+ * eventually call its parent unrealization function during the
+ * unrealize phase. This requires storing the function call somewhere
+ * (usually in the instance structure) so you can eventually call
+ * dc->parent_unrealize(dev);
+ */
void device_class_set_parent_unrealize(DeviceClass *dc,
DeviceUnrealize dev_unrealize,
DeviceUnrealize *parent_unrealize);
@@ -788,12 +988,26 @@ const VMStateDescription *qdev_get_vmsd(DeviceState *dev);
const char *qdev_fw_name(DeviceState *dev);
+void qdev_assert_realized_properly(void);
Object *qdev_get_machine(void);
+/**
+ * qdev_get_human_name() - Return a human-readable name for a device
+ * @dev: The device. Must be a valid and non-NULL pointer.
+ *
+ * .. note::
+ * This function is intended for user friendly error messages.
+ *
+ * Returns: A newly allocated string containing the device id if not null,
+ * else the object canonical path.
+ *
+ * Use g_free() to free it.
+ */
+char *qdev_get_human_name(DeviceState *dev);
+
/* FIXME: make this a link<> */
-void qdev_set_parent_bus(DeviceState *dev, BusState *bus);
+bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp);
-extern bool qdev_hotplug;
extern bool qdev_hot_removed;
char *qdev_get_dev_path(DeviceState *dev);
@@ -803,20 +1017,93 @@ void qbus_set_bus_hotplug_handler(BusState *bus);
static inline bool qbus_is_hotpluggable(BusState *bus)
{
- return bus->hotplug_handler;
+ HotplugHandler *plug_handler = bus->hotplug_handler;
+ bool ret = !!plug_handler;
+
+ if (plug_handler) {
+ HotplugHandlerClass *hdc;
+
+ hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler);
+ if (hdc->is_hotpluggable_bus) {
+ ret = hdc->is_hotpluggable_bus(plug_handler, bus);
+ }
+ }
+ return ret;
+}
+
+/**
+ * qbus_mark_full: Mark this bus as full, so no more devices can be attached
+ * @bus: Bus to mark as full
+ *
+ * By default, QEMU will allow devices to be plugged into a bus up
+ * to the bus class's device count limit. Calling this function
+ * marks a particular bus as full, so that no more devices can be
+ * plugged into it. In particular this means that the bus will not
+ * be considered as a candidate for plugging in devices created by
+ * the user on the commandline or via the monitor.
+ * If a machine has multiple buses of a given type, such as I2C,
+ * where some of those buses in the real hardware are used only for
+ * internal devices and some are exposed via expansion ports, you
+ * can use this function to mark the internal-only buses as full
+ * after you have created all their internal devices. Then user
+ * created devices will appear on the expansion-port bus where
+ * guest software expects them.
+ */
+static inline void qbus_mark_full(BusState *bus)
+{
+ bus->full = true;
}
void device_listener_register(DeviceListener *listener);
void device_listener_unregister(DeviceListener *listener);
/**
- * @qdev_should_hide_device:
- * @opts: QemuOpts as passed on cmdline.
+ * qdev_should_hide_device() - check if device should be hidden
*
- * Check if a device should be added.
- * When a device is added via qdev_device_add() this will be called,
- * and return if the device should be added now or not.
+ * @opts: options QDict
+ * @from_json: true if @opts entries are typed, false for all strings
+ * @errp: pointer to error object
+ *
+ * When a device is added via qdev_device_add() this will be called.
+ *
+ * Return: if the device should be added now or not.
*/
-bool qdev_should_hide_device(QemuOpts *opts);
+bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp);
+
+typedef enum MachineInitPhase {
+ /* current_machine is NULL. */
+ PHASE_NO_MACHINE,
+
+ /* current_machine is not NULL, but current_machine->accel is NULL. */
+ PHASE_MACHINE_CREATED,
+
+ /*
+ * current_machine->accel is not NULL, but the machine properties have
+ * not been validated and machine_class->init has not yet been called.
+ */
+ PHASE_ACCEL_CREATED,
+
+ /*
+ * Late backend objects have been created and initialized.
+ */
+ PHASE_LATE_BACKENDS_CREATED,
+
+ /*
+ * machine_class->init has been called, thus creating any embedded
+ * devices and validating machine properties. Devices created at
+ * this time are considered to be cold-plugged.
+ */
+ PHASE_MACHINE_INITIALIZED,
+
+ /*
+ * QEMU is ready to start CPUs and devices created at this time
+ * are considered to be hot-plugged. The monitor is not restricted
+ * to "preconfig" commands.
+ */
+ PHASE_MACHINE_READY,
+} MachineInitPhase;
+
+bool phase_check(MachineInitPhase phase);
+void phase_advance(MachineInitPhase phase);
#endif
diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h
new file mode 100644
index 0000000000..438f65389f
--- /dev/null
+++ b/include/hw/qdev-properties-system.h
@@ -0,0 +1,97 @@
+#ifndef HW_QDEV_PROPERTIES_SYSTEM_H
+#define HW_QDEV_PROPERTIES_SYSTEM_H
+
+#include "hw/qdev-properties.h"
+
+extern const PropertyInfo qdev_prop_chr;
+extern const PropertyInfo qdev_prop_macaddr;
+extern const PropertyInfo qdev_prop_reserved_region;
+extern const PropertyInfo qdev_prop_multifd_compression;
+extern const PropertyInfo qdev_prop_mig_mode;
+extern const PropertyInfo qdev_prop_granule_mode;
+extern const PropertyInfo qdev_prop_zero_page_detection;
+extern const PropertyInfo qdev_prop_losttickpolicy;
+extern const PropertyInfo qdev_prop_blockdev_on_error;
+extern const PropertyInfo qdev_prop_bios_chs_trans;
+extern const PropertyInfo qdev_prop_fdc_drive_type;
+extern const PropertyInfo qdev_prop_drive;
+extern const PropertyInfo qdev_prop_drive_iothread;
+extern const PropertyInfo qdev_prop_netdev;
+extern const PropertyInfo qdev_prop_pci_devfn;
+extern const PropertyInfo qdev_prop_blocksize;
+extern const PropertyInfo qdev_prop_pci_host_devaddr;
+extern const PropertyInfo qdev_prop_uuid;
+extern const PropertyInfo qdev_prop_audiodev;
+extern const PropertyInfo qdev_prop_off_auto_pcibar;
+extern const PropertyInfo qdev_prop_pcie_link_speed;
+extern const PropertyInfo qdev_prop_pcie_link_width;
+extern const PropertyInfo qdev_prop_cpus390entitlement;
+extern const PropertyInfo qdev_prop_iothread_vq_mapping_list;
+
+#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
+
+#define DEFINE_PROP_CHR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharBackend)
+#define DEFINE_PROP_NETDEV(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, NICPeers)
+#define DEFINE_PROP_DRIVE(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockBackend *)
+#define DEFINE_PROP_DRIVE_IOTHREAD(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_drive_iothread, BlockBackend *)
+#define DEFINE_PROP_MACADDR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_macaddr, MACAddr)
+#define DEFINE_PROP_RESERVED_REGION(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_reserved_region, ReservedRegion)
+#define DEFINE_PROP_MULTIFD_COMPRESSION(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_multifd_compression, \
+ MultiFDCompression)
+#define DEFINE_PROP_MIG_MODE(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_mig_mode, \
+ MigMode)
+#define DEFINE_PROP_GRANULE_MODE(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_granule_mode, GranuleMode)
+#define DEFINE_PROP_ZERO_PAGE_DETECTION(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_zero_page_detection, \
+ ZeroPageDetection)
+#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_losttickpolicy, \
+ LostTickPolicy)
+#define DEFINE_PROP_BLOCKDEV_ON_ERROR(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_on_error, \
+ BlockdevOnError)
+#define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int)
+#define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \
+ DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint32_t)
+#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
+#define DEFINE_PROP_OFF_AUTO_PCIBAR(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_off_auto_pcibar, \
+ OffAutoPCIBAR)
+#define DEFINE_PROP_PCIE_LINK_SPEED(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_speed, \
+ PCIExpLinkSpeed)
+#define DEFINE_PROP_PCIE_LINK_WIDTH(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_width, \
+ PCIExpLinkWidth)
+
+#define DEFINE_PROP_UUID(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uuid, QemuUUID, \
+ .set_default = true)
+
+#define DEFINE_PROP_AUDIODEV(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_audiodev, QEMUSoundCard)
+
+#define DEFINE_PROP_UUID_NODEFAULT(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uuid, QemuUUID)
+
+#define DEFINE_PROP_CPUS390ENTITLEMENT(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \
+ CpuS390Entitlement)
+
+#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \
+ IOThreadVirtQueueMappingList *)
+
+#endif
diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h
index 8f3a98cba6..09aa04ca1e 100644
--- a/include/hw/qdev-properties.h
+++ b/include/hw/qdev-properties.h
@@ -3,113 +3,117 @@
#include "hw/qdev-core.h"
+/**
+ * Property:
+ * @set_default: true if the default value should be set from @defval,
+ * in which case @info->set_default_value must not be NULL
+ * (if false then no default value is set by the property system
+ * and the field retains whatever value it was given by instance_init).
+ * @defval: default value for the property. This is used only if @set_default
+ * is true.
+ */
+struct Property {
+ const char *name;
+ const PropertyInfo *info;
+ ptrdiff_t offset;
+ uint8_t bitnr;
+ uint64_t bitmask;
+ bool set_default;
+ union {
+ int64_t i;
+ uint64_t u;
+ } defval;
+ int arrayoffset;
+ const PropertyInfo *arrayinfo;
+ int arrayfieldsize;
+ const char *link_type;
+};
+
+struct PropertyInfo {
+ const char *name;
+ const char *description;
+ const QEnumLookup *enum_table;
+ bool realized_set_allowed; /* allow setting property on realized device */
+ int (*print)(Object *obj, Property *prop, char *dest, size_t len);
+ void (*set_default_value)(ObjectProperty *op, const Property *prop);
+ ObjectProperty *(*create)(ObjectClass *oc, const char *name,
+ Property *prop);
+ ObjectPropertyAccessor *get;
+ ObjectPropertyAccessor *set;
+ ObjectPropertyRelease *release;
+};
+
+
/*** qdev-properties.c ***/
extern const PropertyInfo qdev_prop_bit;
extern const PropertyInfo qdev_prop_bit64;
extern const PropertyInfo qdev_prop_bool;
+extern const PropertyInfo qdev_prop_enum;
extern const PropertyInfo qdev_prop_uint8;
extern const PropertyInfo qdev_prop_uint16;
extern const PropertyInfo qdev_prop_uint32;
extern const PropertyInfo qdev_prop_int32;
extern const PropertyInfo qdev_prop_uint64;
+extern const PropertyInfo qdev_prop_uint64_checkmask;
extern const PropertyInfo qdev_prop_int64;
extern const PropertyInfo qdev_prop_size;
extern const PropertyInfo qdev_prop_string;
-extern const PropertyInfo qdev_prop_chr;
-extern const PropertyInfo qdev_prop_tpm;
-extern const PropertyInfo qdev_prop_macaddr;
-extern const PropertyInfo qdev_prop_reserved_region;
extern const PropertyInfo qdev_prop_on_off_auto;
-extern const PropertyInfo qdev_prop_multifd_compression;
-extern const PropertyInfo qdev_prop_losttickpolicy;
-extern const PropertyInfo qdev_prop_blockdev_on_error;
-extern const PropertyInfo qdev_prop_bios_chs_trans;
-extern const PropertyInfo qdev_prop_fdc_drive_type;
-extern const PropertyInfo qdev_prop_drive;
-extern const PropertyInfo qdev_prop_drive_iothread;
-extern const PropertyInfo qdev_prop_netdev;
-extern const PropertyInfo qdev_prop_pci_devfn;
extern const PropertyInfo qdev_prop_size32;
-extern const PropertyInfo qdev_prop_blocksize;
-extern const PropertyInfo qdev_prop_pci_host_devaddr;
-extern const PropertyInfo qdev_prop_uuid;
-extern const PropertyInfo qdev_prop_arraylen;
-extern const PropertyInfo qdev_prop_audiodev;
+extern const PropertyInfo qdev_prop_array;
extern const PropertyInfo qdev_prop_link;
-extern const PropertyInfo qdev_prop_off_auto_pcibar;
-extern const PropertyInfo qdev_prop_pcie_link_speed;
-extern const PropertyInfo qdev_prop_pcie_link_width;
-#define DEFINE_PROP(_name, _state, _field, _prop, _type) { \
+#define DEFINE_PROP(_name, _state, _field, _prop, _type, ...) { \
.name = (_name), \
.info = &(_prop), \
.offset = offsetof(_state, _field) \
+ type_check(_type, typeof_field(_state, _field)), \
+ __VA_ARGS__ \
}
-#define DEFINE_PROP_SIGNED(_name, _state, _field, _defval, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type,typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.i = (_type)_defval, \
- }
+#define DEFINE_PROP_SIGNED(_name, _state, _field, _defval, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type, \
+ .set_default = true, \
+ .defval.i = (_type)_defval)
-#define DEFINE_PROP_SIGNED_NODEFAULT(_name, _state, _field, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- }
+#define DEFINE_PROP_SIGNED_NODEFAULT(_name, _state, _field, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type)
-#define DEFINE_PROP_BIT(_name, _state, _field, _bit, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bit), \
- .bitnr = (_bit), \
- .offset = offsetof(_state, _field) \
- + type_check(uint32_t,typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BIT(_name, _state, _field, _bit, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bit, uint32_t, \
+ .bitnr = (_bit), \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define DEFINE_PROP_UNSIGNED(_name, _state, _field, _defval, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (_type)_defval, \
- }
+#define DEFINE_PROP_UNSIGNED(_name, _state, _field, _defval, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type, \
+ .set_default = true, \
+ .defval.u = (_type)_defval)
-#define DEFINE_PROP_UNSIGNED_NODEFAULT(_name, _state, _field, _prop, _type) { \
- .name = (_name), \
- .info = &(_prop), \
- .offset = offsetof(_state, _field) \
- + type_check(_type, typeof_field(_state, _field)), \
- }
+#define DEFINE_PROP_UNSIGNED_NODEFAULT(_name, _state, _field, _prop, _type) \
+ DEFINE_PROP(_name, _state, _field, _prop, _type)
-#define DEFINE_PROP_BIT64(_name, _state, _field, _bit, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bit64), \
- .bitnr = (_bit), \
- .offset = offsetof(_state, _field) \
- + type_check(uint64_t, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BIT64(_name, _state, _field, _bit, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bit64, uint64_t, \
+ .bitnr = (_bit), \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define DEFINE_PROP_BOOL(_name, _state, _field, _defval) { \
- .name = (_name), \
- .info = &(qdev_prop_bool), \
- .offset = offsetof(_state, _field) \
- + type_check(bool, typeof_field(_state, _field)), \
- .set_default = true, \
- .defval.u = (bool)_defval, \
- }
+#define DEFINE_PROP_BOOL(_name, _state, _field, _defval) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_bool, bool, \
+ .set_default = true, \
+ .defval.u = (bool)_defval)
-#define PROP_ARRAY_LEN_PREFIX "len-"
+/**
+ * The DEFINE_PROP_UINT64_CHECKMASK macro checks a user-supplied value
+ * against corresponding bitmask, rejects the value if it violates.
+ * The default value is set in instance_init().
+ */
+#define DEFINE_PROP_UINT64_CHECKMASK(_name, _state, _field, _bitmask) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_uint64_checkmask, uint64_t, \
+ .bitmask = (_bitmask), \
+ .set_default = false)
/**
* DEFINE_PROP_ARRAY:
@@ -121,40 +125,30 @@ extern const PropertyInfo qdev_prop_pcie_link_width;
* @_arrayprop: PropertyInfo defining what property the array elements have
* @_arraytype: C type of the array elements
*
- * Define device properties for a variable-length array _name. A
- * static property "len-arrayname" is defined. When the device creator
- * sets this property to the desired length of array, further dynamic
- * properties "arrayname[0]", "arrayname[1]", ... are defined so the
- * device creator can set the array element values. Setting the
- * "len-arrayname" property more than once is an error.
+ * Define device properties for a variable-length array _name. The array is
+ * represented as a list in the visitor interface.
+ *
+ * @_arraytype is required to be movable with memcpy().
*
- * When the array length is set, the @_field member of the device
+ * When the array property is set, the @_field member of the device
* struct is set to the array length, and @_arrayfield is set to point
- * to (zero-initialised) memory allocated for the array. For a zero
- * length array, @_field will be set to 0 and @_arrayfield to NULL.
+ * to the memory allocated for the array.
+ *
* It is the responsibility of the device deinit code to free the
* @_arrayfield memory.
*/
#define DEFINE_PROP_ARRAY(_name, _state, _field, \
- _arrayfield, _arrayprop, _arraytype) { \
- .name = (PROP_ARRAY_LEN_PREFIX _name), \
- .info = &(qdev_prop_arraylen), \
- .set_default = true, \
- .defval.u = 0, \
- .offset = offsetof(_state, _field) \
- + type_check(uint32_t, typeof_field(_state, _field)), \
- .arrayinfo = &(_arrayprop), \
- .arrayfieldsize = sizeof(_arraytype), \
- .arrayoffset = offsetof(_state, _arrayfield), \
- }
+ _arrayfield, _arrayprop, _arraytype) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_array, uint32_t, \
+ .set_default = true, \
+ .defval.u = 0, \
+ .arrayinfo = &(_arrayprop), \
+ .arrayfieldsize = sizeof(_arraytype), \
+ .arrayoffset = offsetof(_state, _arrayfield))
-#define DEFINE_PROP_LINK(_name, _state, _field, _type, _ptr_type) { \
- .name = (_name), \
- .info = &(qdev_prop_link), \
- .offset = offsetof(_state, _field) \
- + type_check(_ptr_type, typeof_field(_state, _field)), \
- .link_type = _type, \
- }
+#define DEFINE_PROP_LINK(_name, _state, _field, _type, _ptr_type) \
+ DEFINE_PROP(_name, _state, _field, qdev_prop_link, _ptr_type, \
+ .link_type = _type)
#define DEFINE_PROP_UINT8(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_uint8, uint8_t)
@@ -170,74 +164,20 @@ extern const PropertyInfo qdev_prop_pcie_link_width;
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_int64, int64_t)
#define DEFINE_PROP_SIZE(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size, uint64_t)
-#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
-
-#define DEFINE_PROP_CHR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharBackend)
#define DEFINE_PROP_STRING(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_string, char*)
-#define DEFINE_PROP_NETDEV(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, NICPeers)
-#define DEFINE_PROP_DRIVE(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockBackend *)
-#define DEFINE_PROP_DRIVE_IOTHREAD(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_drive_iothread, BlockBackend *)
-#define DEFINE_PROP_MACADDR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_macaddr, MACAddr)
-#define DEFINE_PROP_RESERVED_REGION(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_reserved_region, ReservedRegion)
#define DEFINE_PROP_ON_OFF_AUTO(_n, _s, _f, _d) \
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_on_off_auto, OnOffAuto)
-#define DEFINE_PROP_MULTIFD_COMPRESSION(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_multifd_compression, \
- MultiFDCompression)
-#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_losttickpolicy, \
- LostTickPolicy)
-#define DEFINE_PROP_BLOCKDEV_ON_ERROR(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_on_error, \
- BlockdevOnError)
-#define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int)
#define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \
DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t)
-#define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \
- DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint32_t)
-#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
-#define DEFINE_PROP_OFF_AUTO_PCIBAR(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_off_auto_pcibar, \
- OffAutoPCIBAR)
-#define DEFINE_PROP_PCIE_LINK_SPEED(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_speed, \
- PCIExpLinkSpeed)
-#define DEFINE_PROP_PCIE_LINK_WIDTH(_n, _s, _f, _d) \
- DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pcie_link_width, \
- PCIExpLinkWidth)
-
-#define DEFINE_PROP_UUID(_name, _state, _field) { \
- .name = (_name), \
- .info = &qdev_prop_uuid, \
- .offset = offsetof(_state, _field) \
- + type_check(QemuUUID, typeof_field(_state, _field)), \
- .set_default = true, \
- }
-#define DEFINE_PROP_AUDIODEV(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_audiodev, QEMUSoundCard)
-
-#define DEFINE_PROP_UUID_NODEFAULT(_name, _state, _field) { \
- .name = (_name), \
- .info = &qdev_prop_uuid, \
- .offset = offsetof(_state, _field) \
- + type_check(QemuUUID, typeof_field(_state, _field)), \
- }
#define DEFINE_PROP_END_OF_LIST() \
{}
/*
* Set properties between creation and realization.
+ *
+ * Returns: %true on success, %false on error.
*/
bool qdev_prop_set_drive_err(DeviceState *dev, const char *name,
BlockBackend *value, Error **errp);
@@ -261,15 +201,18 @@ void qdev_prop_set_macaddr(DeviceState *dev, const char *name,
const uint8_t *value);
void qdev_prop_set_enum(DeviceState *dev, const char *name, int value);
-void *qdev_get_prop_ptr(DeviceState *dev, Property *prop);
+/* Takes ownership of @values */
+void qdev_prop_set_array(DeviceState *dev, const char *name, QList *values);
+
+void *object_field_prop_ptr(Object *obj, Property *prop);
void qdev_prop_register_global(GlobalProperty *prop);
-const GlobalProperty *qdev_find_global_prop(DeviceState *dev,
+const GlobalProperty *qdev_find_global_prop(Object *obj,
const char *name);
int qdev_prop_check_globals(void);
void qdev_prop_set_globals(DeviceState *dev);
-void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev,
- Property *prop, const char *value);
+void error_set_from_qdev_prop_error(Error **errp, int ret, Object *obj,
+ const char *name, const char *value);
/**
* qdev_property_add_static:
@@ -287,8 +230,8 @@ void qdev_property_add_static(DeviceState *dev, Property *prop);
* @target: Device which has properties to be aliased
* @source: Object to add alias properties to
*
- * Add alias properties to the @source object for all qdev properties on
- * the @target DeviceState.
+ * Add alias properties to the @source object for all properties on the @target
+ * DeviceState.
*
* This is useful when @target is an internal implementation object
* owned by @source, and you want to expose all the properties of that
diff --git a/include/hw/rdma/rdma.h b/include/hw/rdma/rdma.h
deleted file mode 100644
index 68290fb58c..0000000000
--- a/include/hw/rdma/rdma.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * RDMA device interface
- *
- * Copyright (C) 2019 Oracle
- * Copyright (C) 2019 Red Hat Inc
- *
- * Authors:
- * Yuval Shaia <yuval.shaia@oracle.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef RDMA_H
-#define RDMA_H
-
-#include "qom/object.h"
-
-#define INTERFACE_RDMA_PROVIDER "rdma"
-
-#define INTERFACE_RDMA_PROVIDER_CLASS(klass) \
- OBJECT_CLASS_CHECK(RdmaProviderClass, (klass), \
- INTERFACE_RDMA_PROVIDER)
-#define RDMA_PROVIDER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(RdmaProviderClass, (obj), \
- INTERFACE_RDMA_PROVIDER)
-#define RDMA_PROVIDER(obj) \
- INTERFACE_CHECK(RdmaProvider, (obj), \
- INTERFACE_RDMA_PROVIDER)
-
-typedef struct RdmaProvider RdmaProvider;
-
-typedef struct RdmaProviderClass {
- InterfaceClass parent;
-
- void (*print_statistics)(Monitor *mon, RdmaProvider *obj);
-} RdmaProviderClass;
-
-#endif
diff --git a/include/hw/register.h b/include/hw/register.h
index 5d2c565ae0..6a076cfcdf 100644
--- a/include/hw/register.h
+++ b/include/hw/register.h
@@ -14,6 +14,7 @@
#include "hw/qdev-core.h"
#include "exec/memory.h"
#include "hw/registerfields.h"
+#include "qom/object.h"
typedef struct RegisterInfo RegisterInfo;
typedef struct RegisterAccessInfo RegisterAccessInfo;
@@ -86,8 +87,9 @@ struct RegisterInfo {
void *opaque;
};
-#define TYPE_REGISTER "qemu,register"
-#define REGISTER(obj) OBJECT_CHECK(RegisterInfo, (obj), TYPE_REGISTER)
+#define TYPE_REGISTER "qemu-register"
+DECLARE_INSTANCE_CHECKER(RegisterInfo, REGISTER,
+ TYPE_REGISTER)
/**
* This structure is used to group all of the individual registers which are
@@ -181,6 +183,7 @@ uint64_t register_read_memory(void *opaque, hwaddr addr, unsigned size);
* @data: Array to use for register data, must already be allocated
* @ops: Memory region ops to access registers.
* @debug enabled: turn on/off verbose debug information
+ * @memory_size: Size of the memory region
* returns: A structure containing all of the registers and an initialized
* memory region (r_array->mem) the caller should add to a container.
*/
@@ -201,6 +204,14 @@ RegisterInfoArray *register_init_block32(DeviceState *owner,
bool debug_enabled,
uint64_t memory_size);
+RegisterInfoArray *register_init_block64(DeviceState *owner,
+ const RegisterAccessInfo *rae,
+ int num, RegisterInfo *ri,
+ uint64_t *data,
+ const MemoryRegionOps *ops,
+ bool debug_enabled,
+ uint64_t memory_size);
+
/**
* This function should be called to cleanup the registers that were initialized
* when calling register_init_block32(). This function should only be called
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
index 93fa4a84c2..1330ca77de 100644
--- a/include/hw/registerfields.h
+++ b/include/hw/registerfields.h
@@ -30,6 +30,10 @@
enum { A_ ## reg = (addr) }; \
enum { R_ ## reg = (addr) / 2 };
+#define REG64(reg, addr) \
+ enum { A_ ## reg = (addr) }; \
+ enum { R_ ## reg = (addr) / 8 };
+
/* Define SHIFT, LENGTH and MASK constants for a field within a register */
/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH
@@ -55,9 +59,24 @@
extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX8(storage, reg, field) \
+ sextract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX16(storage, reg, field) \
+ sextract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX32(storage, reg, field) \
+ sextract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_SEX64(storage, reg, field) \
+ sextract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+
/* Extract a field from an array of registers */
#define ARRAY_FIELD_EX32(regs, reg, field) \
FIELD_EX32((regs)[R_ ## reg], reg, field)
+#define ARRAY_FIELD_EX64(regs, reg, field) \
+ FIELD_EX64((regs)[R_ ## reg], reg, field)
/* Deposit a register field.
* Assigning values larger then the target field will result in
@@ -89,7 +108,40 @@
_d; })
#define FIELD_DP64(storage, reg, field, val) ({ \
struct { \
- unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint64_t _d; \
+ _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+
+#define FIELD_SDP8(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint8_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP16(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint16_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP32(storage, reg, field, val) ({ \
+ struct { \
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint32_t _d; \
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
+ _d; })
+#define FIELD_SDP64(storage, reg, field, val) ({ \
+ struct { \
+ int64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
} _v = { .v = val }; \
uint64_t _d; \
_d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
@@ -99,5 +151,77 @@
/* Deposit a field to array of registers. */
#define ARRAY_FIELD_DP32(regs, reg, field, val) \
(regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val);
+#define ARRAY_FIELD_DP64(regs, reg, field, val) \
+ (regs)[R_ ## reg] = FIELD_DP64((regs)[R_ ## reg], reg, field, val);
+
+
+/*
+ * These macros can be used for defining and extracting fields that have the
+ * same bit position across multiple registers.
+ */
+
+/* Define shared SHIFT, LENGTH, and MASK constants */
+#define SHARED_FIELD(name, shift, length) \
+ enum { name ## _ ## SHIFT = (shift)}; \
+ enum { name ## _ ## LENGTH = (length)}; \
+ enum { name ## _ ## MASK = MAKE_64BIT_MASK(shift, length)};
+
+/* Extract a shared field */
+#define SHARED_FIELD_EX8(storage, field) \
+ extract8((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX16(storage, field) \
+ extract16((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX32(storage, field) \
+ extract32((storage), field ## _SHIFT, field ## _LENGTH)
+
+#define SHARED_FIELD_EX64(storage, field) \
+ extract64((storage), field ## _SHIFT, field ## _LENGTH)
+
+/* Extract a shared field from a register array */
+#define SHARED_ARRAY_FIELD_EX32(regs, offset, field) \
+ SHARED_FIELD_EX32((regs)[(offset)], field)
+#define SHARED_ARRAY_FIELD_EX64(regs, offset, field) \
+ SHARED_FIELD_EX64((regs)[(offset)], field)
+
+/* Deposit a shared field */
+#define SHARED_FIELD_DP8(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint8_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP16(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint16_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP32(storage, field, val) ({ \
+ struct { \
+ unsigned int v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint32_t _d; \
+ _d = deposit32((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+#define SHARED_FIELD_DP64(storage, field, val) ({ \
+ struct { \
+ uint64_t v:field ## _LENGTH; \
+ } _v = { .v = val }; \
+ uint64_t _d; \
+ _d = deposit64((storage), field ## _SHIFT, field ## _LENGTH, _v.v); \
+ _d; })
+
+/* Deposit a shared field to a register array */
+#define SHARED_ARRAY_FIELD_DP32(regs, offset, field, val) \
+ (regs)[(offset)] = SHARED_FIELD_DP32((regs)[(offset)], field, val);
+#define SHARED_ARRAY_FIELD_DP64(regs, offset, field, val) \
+ (regs)[(offset)] = SHARED_FIELD_DP64((regs)[(offset)], field, val);
#endif
diff --git a/include/hw/remote/iohub.h b/include/hw/remote/iohub.h
new file mode 100644
index 0000000000..6a8444f9a9
--- /dev/null
+++ b/include/hw/remote/iohub.h
@@ -0,0 +1,42 @@
+/*
+ * IO Hub for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_IOHUB_H
+#define REMOTE_IOHUB_H
+
+#include "hw/pci/pci_device.h"
+#include "qemu/event_notifier.h"
+#include "qemu/thread-posix.h"
+#include "hw/remote/mpqemu-link.h"
+
+#define REMOTE_IOHUB_NB_PIRQS PCI_DEVFN_MAX
+
+typedef struct ResampleToken {
+ void *iohub;
+ int pirq;
+} ResampleToken;
+
+typedef struct RemoteIOHubState {
+ PCIDevice d;
+ EventNotifier irqfds[REMOTE_IOHUB_NB_PIRQS];
+ EventNotifier resamplefds[REMOTE_IOHUB_NB_PIRQS];
+ unsigned int irq_level[REMOTE_IOHUB_NB_PIRQS];
+ ResampleToken token[REMOTE_IOHUB_NB_PIRQS];
+ QemuMutex irq_level_lock[REMOTE_IOHUB_NB_PIRQS];
+} RemoteIOHubState;
+
+int remote_iohub_map_irq(PCIDevice *pci_dev, int intx);
+void remote_iohub_set_irq(void *opaque, int pirq, int level);
+void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg);
+
+void remote_iohub_init(RemoteIOHubState *iohub);
+void remote_iohub_finalize(RemoteIOHubState *iohub);
+
+#endif
diff --git a/include/hw/remote/iommu.h b/include/hw/remote/iommu.h
new file mode 100644
index 0000000000..33b68a8f4b
--- /dev/null
+++ b/include/hw/remote/iommu.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright © 2022 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_IOMMU_H
+#define REMOTE_IOMMU_H
+
+#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci.h"
+
+#ifndef INT2VOIDP
+#define INT2VOIDP(i) (void *)(uintptr_t)(i)
+#endif
+
+typedef struct RemoteIommuElem {
+ MemoryRegion *mr;
+
+ AddressSpace as;
+} RemoteIommuElem;
+
+#define TYPE_REMOTE_IOMMU "x-remote-iommu"
+OBJECT_DECLARE_SIMPLE_TYPE(RemoteIommu, REMOTE_IOMMU)
+
+struct RemoteIommu {
+ Object parent;
+
+ GHashTable *elem_by_devfn;
+
+ QemuMutex lock;
+};
+
+void remote_iommu_setup(PCIBus *pci_bus);
+
+void remote_iommu_unplug_dev(PCIDevice *pci_dev);
+
+#endif
diff --git a/include/hw/remote/machine.h b/include/hw/remote/machine.h
new file mode 100644
index 0000000000..ac32fda387
--- /dev/null
+++ b/include/hw/remote/machine.h
@@ -0,0 +1,42 @@
+/*
+ * Remote machine configuration
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_MACHINE_H
+#define REMOTE_MACHINE_H
+
+#include "qom/object.h"
+#include "hw/boards.h"
+#include "hw/pci-host/remote.h"
+#include "io/channel.h"
+#include "hw/remote/iohub.h"
+
+struct RemoteMachineState {
+ MachineState parent_obj;
+
+ RemotePCIHost *host;
+ RemoteIOHubState iohub;
+
+ bool vfio_user;
+
+ bool auto_shutdown;
+};
+
+/* Used to pass to co-routine device and ioc. */
+typedef struct RemoteCommDev {
+ PCIDevice *dev;
+ QIOChannel *ioc;
+} RemoteCommDev;
+
+#define TYPE_REMOTE_MACHINE "x-remote-machine"
+OBJECT_DECLARE_SIMPLE_TYPE(RemoteMachineState, REMOTE_MACHINE)
+
+void coroutine_fn mpqemu_remote_msg_loop_co(void *data);
+
+#endif
diff --git a/include/hw/remote/memory.h b/include/hw/remote/memory.h
new file mode 100644
index 0000000000..bc2e30945f
--- /dev/null
+++ b/include/hw/remote/memory.h
@@ -0,0 +1,19 @@
+/*
+ * Memory manager for remote device
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef REMOTE_MEMORY_H
+#define REMOTE_MEMORY_H
+
+#include "exec/hwaddr.h"
+#include "hw/remote/mpqemu-link.h"
+
+void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp);
+
+#endif
diff --git a/include/hw/remote/mpqemu-link.h b/include/hw/remote/mpqemu-link.h
new file mode 100644
index 0000000000..4ec0915885
--- /dev/null
+++ b/include/hw/remote/mpqemu-link.h
@@ -0,0 +1,99 @@
+/*
+ * Communication channel between QEMU and remote device process
+ *
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MPQEMU_LINK_H
+#define MPQEMU_LINK_H
+
+#include "qom/object.h"
+#include "qemu/thread.h"
+#include "io/channel.h"
+#include "exec/hwaddr.h"
+#include "io/channel-socket.h"
+#include "hw/remote/proxy.h"
+
+#define REMOTE_MAX_FDS 8
+
+#define MPQEMU_MSG_HDR_SIZE offsetof(MPQemuMsg, data.u64)
+
+/**
+ * MPQemuCmd:
+ *
+ * MPQemuCmd enum type to specify the command to be executed on the remote
+ * device.
+ *
+ * This uses a private protocol between QEMU and the remote process. vfio-user
+ * protocol would supersede this in the future.
+ *
+ */
+typedef enum {
+ MPQEMU_CMD_SYNC_SYSMEM,
+ MPQEMU_CMD_RET,
+ MPQEMU_CMD_PCI_CFGWRITE,
+ MPQEMU_CMD_PCI_CFGREAD,
+ MPQEMU_CMD_BAR_WRITE,
+ MPQEMU_CMD_BAR_READ,
+ MPQEMU_CMD_SET_IRQFD,
+ MPQEMU_CMD_DEVICE_RESET,
+ MPQEMU_CMD_MAX,
+} MPQemuCmd;
+
+typedef struct {
+ hwaddr gpas[REMOTE_MAX_FDS];
+ uint64_t sizes[REMOTE_MAX_FDS];
+ off_t offsets[REMOTE_MAX_FDS];
+} SyncSysmemMsg;
+
+typedef struct {
+ uint32_t addr;
+ uint32_t val;
+ int len;
+} PciConfDataMsg;
+
+typedef struct {
+ hwaddr addr;
+ uint64_t val;
+ unsigned size;
+ bool memory;
+} BarAccessMsg;
+
+/**
+ * MPQemuMsg:
+ * @cmd: The remote command
+ * @size: Size of the data to be shared
+ * @data: Structured data
+ * @fds: File descriptors to be shared with remote device
+ *
+ * MPQemuMsg Format of the message sent to the remote device from QEMU.
+ *
+ */
+
+typedef struct {
+ int cmd;
+ size_t size;
+
+ union {
+ uint64_t u64;
+ PciConfDataMsg pci_conf_data;
+ SyncSysmemMsg sync_sysmem;
+ BarAccessMsg bar_access;
+ } data;
+
+ int fds[REMOTE_MAX_FDS];
+ int num_fds;
+} MPQemuMsg;
+
+bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
+bool mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
+
+uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev,
+ Error **errp);
+bool mpqemu_msg_valid(MPQemuMsg *msg);
+
+#endif
diff --git a/include/hw/remote/proxy-memory-listener.h b/include/hw/remote/proxy-memory-listener.h
new file mode 100644
index 0000000000..c4f3efb928
--- /dev/null
+++ b/include/hw/remote/proxy-memory-listener.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PROXY_MEMORY_LISTENER_H
+#define PROXY_MEMORY_LISTENER_H
+
+#include "exec/memory.h"
+#include "io/channel.h"
+
+typedef struct ProxyMemoryListener {
+ MemoryListener listener;
+
+ int n_mr_sections;
+ MemoryRegionSection *mr_sections;
+
+ QIOChannel *ioc;
+} ProxyMemoryListener;
+
+void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener,
+ QIOChannel *ioc);
+void proxy_memory_listener_deconfigure(ProxyMemoryListener *proxy_listener);
+
+#endif
diff --git a/include/hw/remote/proxy.h b/include/hw/remote/proxy.h
new file mode 100644
index 0000000000..0cfd9665be
--- /dev/null
+++ b/include/hw/remote/proxy.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2018, 2021 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PROXY_H
+#define PROXY_H
+
+#include "hw/pci/pci_device.h"
+#include "io/channel.h"
+#include "hw/remote/proxy-memory-listener.h"
+#include "qemu/event_notifier.h"
+
+#define TYPE_PCI_PROXY_DEV "x-pci-proxy-dev"
+OBJECT_DECLARE_SIMPLE_TYPE(PCIProxyDev, PCI_PROXY_DEV)
+
+typedef struct ProxyMemoryRegion {
+ PCIProxyDev *dev;
+ MemoryRegion mr;
+ bool memory;
+ bool present;
+ uint8_t type;
+} ProxyMemoryRegion;
+
+struct PCIProxyDev {
+ PCIDevice parent_dev;
+ char *fd;
+
+ /*
+ * Mutex used to protect the QIOChannel fd from
+ * the concurrent access by the VCPUs since proxy
+ * blocks while awaiting for the replies from the
+ * process remote.
+ */
+ QemuMutex io_mutex;
+ QIOChannel *ioc;
+ Error *migration_blocker;
+ ProxyMemoryListener proxy_listener;
+ int virq;
+ EventNotifier intr;
+ EventNotifier resample;
+ ProxyMemoryRegion region[PCI_NUM_REGIONS];
+};
+
+#endif /* PROXY_H */
diff --git a/include/hw/remote/vfio-user-obj.h b/include/hw/remote/vfio-user-obj.h
new file mode 100644
index 0000000000..87ab78b875
--- /dev/null
+++ b/include/hw/remote/vfio-user-obj.h
@@ -0,0 +1,6 @@
+#ifndef VFIO_USER_OBJ_H
+#define VFIO_USER_OBJ_H
+
+void vfu_object_set_bus_irq(PCIBus *pci_bus);
+
+#endif
diff --git a/include/hw/resettable.h b/include/hw/resettable.h
index f4c4bab0ef..bdcd1276b6 100644
--- a/include/hw/resettable.h
+++ b/include/hw/resettable.h
@@ -17,11 +17,10 @@
#define TYPE_RESETTABLE_INTERFACE "resettable"
-#define RESETTABLE_CLASS(class) \
- OBJECT_CLASS_CHECK(ResettableClass, (class), TYPE_RESETTABLE_INTERFACE)
+typedef struct ResettableClass ResettableClass;
+DECLARE_CLASS_CHECKERS(ResettableClass, RESETTABLE,
+ TYPE_RESETTABLE_INTERFACE)
-#define RESETTABLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ResettableClass, (obj), TYPE_RESETTABLE_INTERFACE)
typedef struct ResettableState ResettableState;
@@ -119,7 +118,7 @@ typedef struct ResettablePhases {
ResettableHoldPhase hold;
ResettableExitPhase exit;
} ResettablePhases;
-typedef struct ResettableClass {
+struct ResettableClass {
InterfaceClass parent_class;
/* Phase methods */
@@ -133,7 +132,7 @@ typedef struct ResettableClass {
/* Hierarchy handling method */
ResettableChildForeach child_foreach;
-} ResettableClass;
+};
/**
* ResettableState:
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index 451338780a..a2e4ae9cb0 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -22,25 +22,45 @@
#include "exec/cpu-defs.h"
#include "hw/loader.h"
+#include "hw/riscv/riscv_hart.h"
-void riscv_find_and_load_firmware(MachineState *machine,
- const char *default_machine_firmware,
- hwaddr firmware_load_addr,
- symbol_fn_t sym_cb);
-char *riscv_find_firmware(const char *firmware_filename);
+#define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin"
+#define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin"
+
+bool riscv_is_32bit(RISCVHartArrayState *harts);
+
+char *riscv_plic_hart_config_string(int hart_count);
+
+target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts,
+ target_ulong firmware_end_addr);
+target_ulong riscv_find_and_load_firmware(MachineState *machine,
+ const char *default_machine_firmware,
+ hwaddr firmware_load_addr,
+ symbol_fn_t sym_cb);
+const char *riscv_default_firmware_name(RISCVHartArrayState *harts);
+char *riscv_find_firmware(const char *firmware_filename,
+ const char *default_machine_firmware);
target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb);
-target_ulong riscv_load_kernel(const char *kernel_filename,
+target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
+ target_ulong firmware_end_addr,
+ bool load_initrd,
symbol_fn_t sym_cb);
-hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size,
- uint64_t kernel_entry, hwaddr *start);
-uint32_t riscv_load_fdt(hwaddr dram_start, uint64_t dram_size, void *fdt);
-void riscv_setup_rom_reset_vec(hwaddr saddr, hwaddr rom_base,
- hwaddr rom_size, uint64_t kernel_entry,
- uint32_t fdt_load_addr, void *fdt);
-void riscv_rom_copy_firmware_info(hwaddr rom_base, hwaddr rom_size,
+uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
+ MachineState *ms);
+void riscv_load_fdt(hwaddr fdt_addr, void *fdt);
+void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts,
+ hwaddr saddr,
+ hwaddr rom_base, hwaddr rom_size,
+ uint64_t kernel_entry,
+ uint64_t fdt_load_addr);
+void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
+ hwaddr rom_size,
uint32_t reset_vec_size,
uint64_t kernel_entry);
+void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr);
+void riscv_setup_firmware_boot(MachineState *machine);
#endif /* RISCV_BOOT_H */
diff --git a/include/hw/riscv/boot_opensbi.h b/include/hw/riscv/boot_opensbi.h
index 0d5ddd6c3d..1b749663dc 100644
--- a/include/hw/riscv/boot_opensbi.h
+++ b/include/hw/riscv/boot_opensbi.h
@@ -4,8 +4,11 @@
*
* Based on include/sbi/{fw_dynamic.h,sbi_scratch.h} from the OpenSBI project.
*/
-#ifndef OPENSBI_H
-#define OPENSBI_H
+
+#ifndef RISCV_BOOT_OPENSBI_H
+#define RISCV_BOOT_OPENSBI_H
+
+#include "exec/cpu-defs.h"
/** Expected value of info magic ('OSBI' ascii string in hex) */
#define FW_DYNAMIC_INFO_MAGIC_VALUE 0x4942534f
diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h
new file mode 100644
index 0000000000..daef086da6
--- /dev/null
+++ b/include/hw/riscv/microchip_pfsoc.h
@@ -0,0 +1,168 @@
+/*
+ * Microchip PolarFire SoC machine interface
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_MICROCHIP_PFSOC_H
+#define HW_MICROCHIP_PFSOC_H
+
+#include "hw/boards.h"
+#include "hw/char/mchp_pfsoc_mmuart.h"
+#include "hw/cpu/cluster.h"
+#include "hw/dma/sifive_pdma.h"
+#include "hw/misc/mchp_pfsoc_dmc.h"
+#include "hw/misc/mchp_pfsoc_ioscb.h"
+#include "hw/misc/mchp_pfsoc_sysreg.h"
+#include "hw/net/cadence_gem.h"
+#include "hw/sd/cadence_sdhci.h"
+#include "hw/riscv/riscv_hart.h"
+
+typedef struct MicrochipPFSoCState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ CPUClusterState e_cluster;
+ CPUClusterState u_cluster;
+ RISCVHartArrayState e_cpus;
+ RISCVHartArrayState u_cpus;
+ DeviceState *plic;
+ MchpPfSoCDdrSgmiiPhyState ddr_sgmii_phy;
+ MchpPfSoCDdrCfgState ddr_cfg;
+ MchpPfSoCIoscbState ioscb;
+ MchpPfSoCMMUartState *serial0;
+ MchpPfSoCMMUartState *serial1;
+ MchpPfSoCMMUartState *serial2;
+ MchpPfSoCMMUartState *serial3;
+ MchpPfSoCMMUartState *serial4;
+ MchpPfSoCSysregState sysreg;
+ SiFivePDMAState dma;
+ CadenceGEMState gem0;
+ CadenceGEMState gem1;
+ CadenceSDHCIState sdhci;
+} MicrochipPFSoCState;
+
+#define TYPE_MICROCHIP_PFSOC "microchip.pfsoc"
+#define MICROCHIP_PFSOC(obj) \
+ OBJECT_CHECK(MicrochipPFSoCState, (obj), TYPE_MICROCHIP_PFSOC)
+
+typedef struct MicrochipIcicleKitState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ MicrochipPFSoCState soc;
+} MicrochipIcicleKitState;
+
+#define TYPE_MICROCHIP_ICICLE_KIT_MACHINE \
+ MACHINE_TYPE_NAME("microchip-icicle-kit")
+#define MICROCHIP_ICICLE_KIT_MACHINE(obj) \
+ OBJECT_CHECK(MicrochipIcicleKitState, (obj), \
+ TYPE_MICROCHIP_ICICLE_KIT_MACHINE)
+
+enum {
+ MICROCHIP_PFSOC_RSVD0,
+ MICROCHIP_PFSOC_DEBUG,
+ MICROCHIP_PFSOC_E51_DTIM,
+ MICROCHIP_PFSOC_BUSERR_UNIT0,
+ MICROCHIP_PFSOC_BUSERR_UNIT1,
+ MICROCHIP_PFSOC_BUSERR_UNIT2,
+ MICROCHIP_PFSOC_BUSERR_UNIT3,
+ MICROCHIP_PFSOC_BUSERR_UNIT4,
+ MICROCHIP_PFSOC_CLINT,
+ MICROCHIP_PFSOC_L2CC,
+ MICROCHIP_PFSOC_DMA,
+ MICROCHIP_PFSOC_L2LIM,
+ MICROCHIP_PFSOC_PLIC,
+ MICROCHIP_PFSOC_MMUART0,
+ MICROCHIP_PFSOC_WDOG0,
+ MICROCHIP_PFSOC_SYSREG,
+ MICROCHIP_PFSOC_AXISW,
+ MICROCHIP_PFSOC_MPUCFG,
+ MICROCHIP_PFSOC_FMETER,
+ MICROCHIP_PFSOC_DDR_SGMII_PHY,
+ MICROCHIP_PFSOC_EMMC_SD,
+ MICROCHIP_PFSOC_DDR_CFG,
+ MICROCHIP_PFSOC_MMUART1,
+ MICROCHIP_PFSOC_MMUART2,
+ MICROCHIP_PFSOC_MMUART3,
+ MICROCHIP_PFSOC_MMUART4,
+ MICROCHIP_PFSOC_WDOG1,
+ MICROCHIP_PFSOC_WDOG2,
+ MICROCHIP_PFSOC_WDOG3,
+ MICROCHIP_PFSOC_WDOG4,
+ MICROCHIP_PFSOC_SPI0,
+ MICROCHIP_PFSOC_SPI1,
+ MICROCHIP_PFSOC_I2C0,
+ MICROCHIP_PFSOC_I2C1,
+ MICROCHIP_PFSOC_CAN0,
+ MICROCHIP_PFSOC_CAN1,
+ MICROCHIP_PFSOC_GEM0,
+ MICROCHIP_PFSOC_GEM1,
+ MICROCHIP_PFSOC_GPIO0,
+ MICROCHIP_PFSOC_GPIO1,
+ MICROCHIP_PFSOC_GPIO2,
+ MICROCHIP_PFSOC_RTC,
+ MICROCHIP_PFSOC_ENVM_CFG,
+ MICROCHIP_PFSOC_ENVM_DATA,
+ MICROCHIP_PFSOC_USB,
+ MICROCHIP_PFSOC_QSPI_XIP,
+ MICROCHIP_PFSOC_IOSCB,
+ MICROCHIP_PFSOC_FABRIC_FIC0,
+ MICROCHIP_PFSOC_FABRIC_FIC1,
+ MICROCHIP_PFSOC_FABRIC_FIC3,
+ MICROCHIP_PFSOC_DRAM_LO,
+ MICROCHIP_PFSOC_DRAM_LO_ALIAS,
+ MICROCHIP_PFSOC_DRAM_HI,
+ MICROCHIP_PFSOC_DRAM_HI_ALIAS
+};
+
+enum {
+ MICROCHIP_PFSOC_DMA_IRQ0 = 5,
+ MICROCHIP_PFSOC_DMA_IRQ1 = 6,
+ MICROCHIP_PFSOC_DMA_IRQ2 = 7,
+ MICROCHIP_PFSOC_DMA_IRQ3 = 8,
+ MICROCHIP_PFSOC_DMA_IRQ4 = 9,
+ MICROCHIP_PFSOC_DMA_IRQ5 = 10,
+ MICROCHIP_PFSOC_DMA_IRQ6 = 11,
+ MICROCHIP_PFSOC_DMA_IRQ7 = 12,
+ MICROCHIP_PFSOC_GEM0_IRQ = 64,
+ MICROCHIP_PFSOC_GEM1_IRQ = 70,
+ MICROCHIP_PFSOC_EMMC_SD_IRQ = 88,
+ MICROCHIP_PFSOC_MMUART0_IRQ = 90,
+ MICROCHIP_PFSOC_MMUART1_IRQ = 91,
+ MICROCHIP_PFSOC_MMUART2_IRQ = 92,
+ MICROCHIP_PFSOC_MMUART3_IRQ = 93,
+ MICROCHIP_PFSOC_MMUART4_IRQ = 94,
+ MICROCHIP_PFSOC_MAILBOX_IRQ = 96,
+};
+
+#define MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT 1
+#define MICROCHIP_PFSOC_COMPUTE_CPU_COUNT 4
+
+#define MICROCHIP_PFSOC_PLIC_NUM_SOURCES 187
+#define MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES 7
+#define MICROCHIP_PFSOC_PLIC_PRIORITY_BASE 0x00
+#define MICROCHIP_PFSOC_PLIC_PENDING_BASE 0x1000
+#define MICROCHIP_PFSOC_PLIC_ENABLE_BASE 0x2000
+#define MICROCHIP_PFSOC_PLIC_ENABLE_STRIDE 0x80
+#define MICROCHIP_PFSOC_PLIC_CONTEXT_BASE 0x200000
+#define MICROCHIP_PFSOC_PLIC_CONTEXT_STRIDE 0x1000
+
+#endif /* HW_MICROCHIP_PFSOC_H */
diff --git a/include/hw/riscv/numa.h b/include/hw/riscv/numa.h
new file mode 100644
index 0000000000..8f5280211d
--- /dev/null
+++ b/include/hw/riscv/numa.h
@@ -0,0 +1,114 @@
+/*
+ * QEMU RISC-V NUMA Helper
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_NUMA_H
+#define RISCV_NUMA_H
+
+#include "hw/boards.h"
+#include "hw/sysbus.h"
+#include "sysemu/numa.h"
+
+/**
+ * riscv_socket_count:
+ * @ms: pointer to machine state
+ *
+ * Returns: number of sockets for a numa system and 1 for a non-numa system
+ */
+int riscv_socket_count(const MachineState *ms);
+
+/**
+ * riscv_socket_first_hartid:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: first hartid for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_first_hartid(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_last_hartid:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: last hartid for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_last_hartid(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_hart_count:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: number of harts for a valid socket and -1 for an invalid socket
+ */
+int riscv_socket_hart_count(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_mem_offset:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: offset of ram belonging to given socket
+ */
+uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_mem_size:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: size of ram belonging to given socket
+ */
+uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_check_hartids:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Returns: true if hardids belonging to given socket are contiguous else false
+ */
+bool riscv_socket_check_hartids(const MachineState *ms, int socket_id);
+
+/**
+ * riscv_socket_fdt_write_id:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Write NUMA node-id FDT property in MachineState->fdt
+ */
+void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name,
+ int socket_id);
+
+/**
+ * riscv_socket_fdt_write_distance_matrix:
+ * @ms: pointer to machine state
+ * @socket_id: socket index
+ *
+ * Write NUMA distance matrix in MachineState->fdt
+ */
+void riscv_socket_fdt_write_distance_matrix(const MachineState *ms);
+
+CpuInstanceProperties
+riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index);
+
+int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx);
+
+const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms);
+
+#endif /* RISCV_NUMA_H */
diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h
index 8f29b9cbbf..609473d07b 100644
--- a/include/hw/riscv/opentitan.h
+++ b/include/hw/riscv/opentitan.h
@@ -20,65 +20,103 @@
#define HW_OPENTITAN_H
#include "hw/riscv/riscv_hart.h"
-#include "hw/intc/ibex_plic.h"
+#include "hw/intc/sifive_plic.h"
#include "hw/char/ibex_uart.h"
+#include "hw/timer/ibex_timer.h"
+#include "hw/ssi/ibex_spi_host.h"
+#include "hw/boards.h"
+#include "qom/object.h"
#define TYPE_RISCV_IBEX_SOC "riscv.lowrisc.ibex.soc"
-#define RISCV_IBEX_SOC(obj) \
- OBJECT_CHECK(LowRISCIbexSoCState, (obj), TYPE_RISCV_IBEX_SOC)
+OBJECT_DECLARE_SIMPLE_TYPE(LowRISCIbexSoCState, RISCV_IBEX_SOC)
-typedef struct LowRISCIbexSoCState {
+enum {
+ OPENTITAN_SPI_HOST0,
+ OPENTITAN_SPI_HOST1,
+ OPENTITAN_NUM_SPI_HOSTS,
+};
+
+struct LowRISCIbexSoCState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
RISCVHartArrayState cpus;
- IbexPlicState plic;
+ SiFivePLICState plic;
IbexUartState uart;
+ IbexTimerState timer;
+ IbexSPIHostState spi_host[OPENTITAN_NUM_SPI_HOSTS];
+
+ uint32_t resetvec;
MemoryRegion flash_mem;
MemoryRegion rom;
-} LowRISCIbexSoCState;
+ MemoryRegion flash_alias;
+};
+
+#define TYPE_OPENTITAN_MACHINE MACHINE_TYPE_NAME("opentitan")
+OBJECT_DECLARE_SIMPLE_TYPE(OpenTitanState, OPENTITAN_MACHINE)
typedef struct OpenTitanState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent_obj;
/*< public >*/
LowRISCIbexSoCState soc;
} OpenTitanState;
enum {
- IBEX_ROM,
- IBEX_RAM,
- IBEX_FLASH,
- IBEX_UART,
- IBEX_GPIO,
- IBEX_SPI,
- IBEX_FLASH_CTRL,
- IBEX_RV_TIMER,
- IBEX_AES,
- IBEX_HMAC,
- IBEX_PLIC,
- IBEX_PWRMGR,
- IBEX_RSTMGR,
- IBEX_CLKMGR,
- IBEX_PINMUX,
- IBEX_ALERT_HANDLER,
- IBEX_NMI_GEN,
- IBEX_USBDEV,
- IBEX_PADCTRL,
+ IBEX_DEV_ROM,
+ IBEX_DEV_RAM,
+ IBEX_DEV_FLASH,
+ IBEX_DEV_FLASH_VIRTUAL,
+ IBEX_DEV_UART,
+ IBEX_DEV_SPI_DEVICE,
+ IBEX_DEV_SPI_HOST0,
+ IBEX_DEV_SPI_HOST1,
+ IBEX_DEV_GPIO,
+ IBEX_DEV_I2C,
+ IBEX_DEV_PATTGEN,
+ IBEX_DEV_TIMER,
+ IBEX_DEV_SENSOR_CTRL,
+ IBEX_DEV_OTP_CTRL,
+ IBEX_DEV_LC_CTRL,
+ IBEX_DEV_PWRMGR,
+ IBEX_DEV_RSTMGR,
+ IBEX_DEV_CLKMGR,
+ IBEX_DEV_PINMUX,
+ IBEX_DEV_AON_TIMER,
+ IBEX_DEV_USBDEV,
+ IBEX_DEV_FLASH_CTRL,
+ IBEX_DEV_PLIC,
+ IBEX_DEV_AES,
+ IBEX_DEV_HMAC,
+ IBEX_DEV_KMAC,
+ IBEX_DEV_KEYMGR,
+ IBEX_DEV_CSRNG,
+ IBEX_DEV_ENTROPY,
+ IBEX_DEV_EDNO,
+ IBEX_DEV_EDN1,
+ IBEX_DEV_ALERT_HANDLER,
+ IBEX_DEV_SRAM_CTRL,
+ IBEX_DEV_OTBN,
+ IBEX_DEV_IBEX_CFG,
};
enum {
- IBEX_UART_RX_PARITY_ERR_IRQ = 0x28,
- IBEX_UART_RX_TIMEOUT_IRQ = 0x27,
- IBEX_UART_RX_BREAK_ERR_IRQ = 0x26,
- IBEX_UART_RX_FRAME_ERR_IRQ = 0x25,
- IBEX_UART_RX_OVERFLOW_IRQ = 0x24,
- IBEX_UART_TX_EMPTY_IRQ = 0x23,
- IBEX_UART_RX_WATERMARK_IRQ = 0x22,
- IBEX_UART_TX_WATERMARK_IRQ = 0x21,
+ IBEX_UART0_TX_WATERMARK_IRQ = 1,
+ IBEX_UART0_RX_WATERMARK_IRQ = 2,
+ IBEX_UART0_TX_EMPTY_IRQ = 3,
+ IBEX_UART0_RX_OVERFLOW_IRQ = 4,
+ IBEX_UART0_RX_FRAME_ERR_IRQ = 5,
+ IBEX_UART0_RX_BREAK_ERR_IRQ = 6,
+ IBEX_UART0_RX_TIMEOUT_IRQ = 7,
+ IBEX_UART0_RX_PARITY_ERR_IRQ = 8,
+ IBEX_TIMER_TIMEREXPIRED0_0 = 124,
+ IBEX_SPI_HOST0_ERR_IRQ = 131,
+ IBEX_SPI_HOST0_SPI_EVENT_IRQ = 132,
+ IBEX_SPI_HOST1_ERR_IRQ = 133,
+ IBEX_SPI_HOST1_SPI_EVENT_IRQ = 134,
};
#endif
diff --git a/include/hw/riscv/riscv_hart.h b/include/hw/riscv/riscv_hart.h
index c75856fa73..912b4a2682 100644
--- a/include/hw/riscv/riscv_hart.h
+++ b/include/hw/riscv/riscv_hart.h
@@ -3,7 +3,7 @@
*
* Copyright (c) 2017 SiFive, Inc.
*
- * Holds the state of a heterogenous array of RISC-V harts
+ * Holds the state of a heterogeneous array of RISC-V harts
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -23,13 +23,13 @@
#include "hw/sysbus.h"
#include "target/riscv/cpu.h"
+#include "qom/object.h"
#define TYPE_RISCV_HART_ARRAY "riscv.hart_array"
-#define RISCV_HART_ARRAY(obj) \
- OBJECT_CHECK(RISCVHartArrayState, (obj), TYPE_RISCV_HART_ARRAY)
+OBJECT_DECLARE_SIMPLE_TYPE(RISCVHartArrayState, RISCV_HART_ARRAY)
-typedef struct RISCVHartArrayState {
+struct RISCVHartArrayState {
/*< private >*/
SysBusDevice parent_obj;
@@ -37,7 +37,8 @@ typedef struct RISCVHartArrayState {
uint32_t num_harts;
uint32_t hartid_base;
char *cpu_type;
+ uint64_t resetvec;
RISCVCPU *harts;
-} RISCVHartArrayState;
+};
#endif
diff --git a/include/hw/riscv/shakti_c.h b/include/hw/riscv/shakti_c.h
new file mode 100644
index 0000000000..539fe1156d
--- /dev/null
+++ b/include/hw/riscv/shakti_c.h
@@ -0,0 +1,75 @@
+/*
+ * Shakti C-class SoC emulation
+ *
+ * Copyright (c) 2021 Vijai Kumar K <vijai@behindbytes.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SHAKTI_C_H
+#define HW_SHAKTI_C_H
+
+#include "hw/riscv/riscv_hart.h"
+#include "hw/boards.h"
+#include "hw/char/shakti_uart.h"
+
+#define TYPE_RISCV_SHAKTI_SOC "riscv.shakti.cclass.soc"
+#define RISCV_SHAKTI_SOC(obj) \
+ OBJECT_CHECK(ShaktiCSoCState, (obj), TYPE_RISCV_SHAKTI_SOC)
+
+typedef struct ShaktiCSoCState {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ RISCVHartArrayState cpus;
+ DeviceState *plic;
+ ShaktiUartState uart;
+ MemoryRegion rom;
+
+} ShaktiCSoCState;
+
+#define TYPE_RISCV_SHAKTI_MACHINE MACHINE_TYPE_NAME("shakti_c")
+#define RISCV_SHAKTI_MACHINE(obj) \
+ OBJECT_CHECK(ShaktiCMachineState, (obj), TYPE_RISCV_SHAKTI_MACHINE)
+typedef struct ShaktiCMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ ShaktiCSoCState soc;
+} ShaktiCMachineState;
+
+enum {
+ SHAKTI_C_ROM,
+ SHAKTI_C_RAM,
+ SHAKTI_C_UART,
+ SHAKTI_C_GPIO,
+ SHAKTI_C_PLIC,
+ SHAKTI_C_CLINT,
+ SHAKTI_C_I2C,
+};
+
+#define SHAKTI_C_PLIC_HART_CONFIG "MS"
+/* Including Interrupt ID 0 (no interrupt)*/
+#define SHAKTI_C_PLIC_NUM_SOURCES 28
+/* Excluding Priority 0 */
+#define SHAKTI_C_PLIC_NUM_PRIORITIES 2
+#define SHAKTI_C_PLIC_PRIORITY_BASE 0x00
+#define SHAKTI_C_PLIC_PENDING_BASE 0x1000
+#define SHAKTI_C_PLIC_ENABLE_BASE 0x2000
+#define SHAKTI_C_PLIC_ENABLE_STRIDE 0x80
+#define SHAKTI_C_PLIC_CONTEXT_BASE 0x200000
+#define SHAKTI_C_PLIC_CONTEXT_STRIDE 0x1000
+
+#endif
diff --git a/include/hw/riscv/sifive_clint.h b/include/hw/riscv/sifive_clint.h
deleted file mode 100644
index 4a720bfece..0000000000
--- a/include/hw/riscv/sifive_clint.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * SiFive CLINT (Core Local Interruptor) interface
- *
- * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
- * Copyright (c) 2017 SiFive, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2 or later, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef HW_SIFIVE_CLINT_H
-#define HW_SIFIVE_CLINT_H
-
-#include "hw/sysbus.h"
-
-#define TYPE_SIFIVE_CLINT "riscv.sifive.clint"
-
-#define SIFIVE_CLINT(obj) \
- OBJECT_CHECK(SiFiveCLINTState, (obj), TYPE_SIFIVE_CLINT)
-
-typedef struct SiFiveCLINTState {
- /*< private >*/
- SysBusDevice parent_obj;
-
- /*< public >*/
- MemoryRegion mmio;
- uint32_t num_harts;
- uint32_t sip_base;
- uint32_t timecmp_base;
- uint32_t time_base;
- uint32_t aperture_size;
-} SiFiveCLINTState;
-
-DeviceState *sifive_clint_create(hwaddr addr, hwaddr size, uint32_t num_harts,
- uint32_t sip_base, uint32_t timecmp_base, uint32_t time_base,
- bool provide_rdtime);
-
-enum {
- SIFIVE_SIP_BASE = 0x0,
- SIFIVE_TIMECMP_BASE = 0x4000,
- SIFIVE_TIME_BASE = 0xBFF8
-};
-
-enum {
- SIFIVE_CLINT_TIMEBASE_FREQ = 10000000
-};
-
-#endif
diff --git a/include/hw/riscv/sifive_e.h b/include/hw/riscv/sifive_e.h
index 637414130b..31180a680e 100644
--- a/include/hw/riscv/sifive_e.h
+++ b/include/hw/riscv/sifive_e.h
@@ -21,7 +21,9 @@
#include "hw/riscv/riscv_hart.h"
#include "hw/riscv/sifive_cpu.h"
-#include "hw/riscv/sifive_gpio.h"
+#include "hw/gpio/sifive_gpio.h"
+#include "hw/misc/sifive_e_aon.h"
+#include "hw/boards.h"
#define TYPE_RISCV_E_SOC "riscv.sifive.e.soc"
#define RISCV_E_SOC(obj) \
@@ -34,6 +36,7 @@ typedef struct SiFiveESoCState {
/*< public >*/
RISCVHartArrayState cpus;
DeviceState *plic;
+ SiFiveEAONState aon;
SIFIVEGPIOState gpio;
MemoryRegion xip_mem;
MemoryRegion mask_rom;
@@ -41,7 +44,7 @@ typedef struct SiFiveESoCState {
typedef struct SiFiveEState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent_obj;
/*< public >*/
SiFiveESoCState soc;
@@ -53,37 +56,43 @@ typedef struct SiFiveEState {
OBJECT_CHECK(SiFiveEState, (obj), TYPE_RISCV_E_MACHINE)
enum {
- SIFIVE_E_DEBUG,
- SIFIVE_E_MROM,
- SIFIVE_E_OTP,
- SIFIVE_E_CLINT,
- SIFIVE_E_PLIC,
- SIFIVE_E_AON,
- SIFIVE_E_PRCI,
- SIFIVE_E_OTP_CTRL,
- SIFIVE_E_GPIO0,
- SIFIVE_E_UART0,
- SIFIVE_E_QSPI0,
- SIFIVE_E_PWM0,
- SIFIVE_E_UART1,
- SIFIVE_E_QSPI1,
- SIFIVE_E_PWM1,
- SIFIVE_E_QSPI2,
- SIFIVE_E_PWM2,
- SIFIVE_E_XIP,
- SIFIVE_E_DTIM
+ SIFIVE_E_DEV_DEBUG,
+ SIFIVE_E_DEV_MROM,
+ SIFIVE_E_DEV_OTP,
+ SIFIVE_E_DEV_CLINT,
+ SIFIVE_E_DEV_PLIC,
+ SIFIVE_E_DEV_AON,
+ SIFIVE_E_DEV_PRCI,
+ SIFIVE_E_DEV_OTP_CTRL,
+ SIFIVE_E_DEV_GPIO0,
+ SIFIVE_E_DEV_UART0,
+ SIFIVE_E_DEV_QSPI0,
+ SIFIVE_E_DEV_PWM0,
+ SIFIVE_E_DEV_UART1,
+ SIFIVE_E_DEV_QSPI1,
+ SIFIVE_E_DEV_PWM1,
+ SIFIVE_E_DEV_QSPI2,
+ SIFIVE_E_DEV_PWM2,
+ SIFIVE_E_DEV_XIP,
+ SIFIVE_E_DEV_DTIM
};
enum {
- SIFIVE_E_UART0_IRQ = 3,
- SIFIVE_E_UART1_IRQ = 4,
- SIFIVE_E_GPIO0_IRQ0 = 8
+ SIFIVE_E_AON_WDT_IRQ = 1,
+ SIFIVE_E_UART0_IRQ = 3,
+ SIFIVE_E_UART1_IRQ = 4,
+ SIFIVE_E_GPIO0_IRQ0 = 8
};
#define SIFIVE_E_PLIC_HART_CONFIG "M"
-#define SIFIVE_E_PLIC_NUM_SOURCES 127
+/*
+ * Freedom E310 G002 and G003 supports 52 interrupt sources while
+ * Freedom E310 G000 supports 51 interrupt sources. We use the value
+ * of G002 and G003, so it is 53 (including interrupt source 0).
+ */
+#define SIFIVE_E_PLIC_NUM_SOURCES 53
#define SIFIVE_E_PLIC_NUM_PRIORITIES 7
-#define SIFIVE_E_PLIC_PRIORITY_BASE 0x04
+#define SIFIVE_E_PLIC_PRIORITY_BASE 0x00
#define SIFIVE_E_PLIC_PENDING_BASE 0x1000
#define SIFIVE_E_PLIC_ENABLE_BASE 0x2000
#define SIFIVE_E_PLIC_ENABLE_STRIDE 0x80
diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h
index aba4d0181f..0696f85942 100644
--- a/include/hw/riscv/sifive_u.h
+++ b/include/hw/riscv/sifive_u.h
@@ -19,12 +19,17 @@
#ifndef HW_SIFIVE_U_H
#define HW_SIFIVE_U_H
+#include "hw/boards.h"
+#include "hw/cpu/cluster.h"
+#include "hw/dma/sifive_pdma.h"
#include "hw/net/cadence_gem.h"
#include "hw/riscv/riscv_hart.h"
#include "hw/riscv/sifive_cpu.h"
-#include "hw/riscv/sifive_gpio.h"
-#include "hw/riscv/sifive_u_prci.h"
-#include "hw/riscv/sifive_u_otp.h"
+#include "hw/gpio/sifive_gpio.h"
+#include "hw/misc/sifive_u_otp.h"
+#include "hw/misc/sifive_u_prci.h"
+#include "hw/ssi/sifive_spi.h"
+#include "hw/timer/sifive_pwm.h"
#define TYPE_RISCV_U_SOC "riscv.sifive.u.soc"
#define RISCV_U_SOC(obj) \
@@ -43,9 +48,14 @@ typedef struct SiFiveUSoCState {
SiFiveUPRCIState prci;
SIFIVEGPIOState gpio;
SiFiveUOTPState otp;
+ SiFivePDMAState dma;
+ SiFiveSPIState spi0;
+ SiFiveSPIState spi2;
CadenceGEMState gem;
+ SiFivePwmState pwm[2];
uint32_t serial;
+ char *cpu_type;
} SiFiveUSoCState;
#define TYPE_RISCV_U_MACHINE MACHINE_TYPE_NAME("sifive_u")
@@ -58,8 +68,6 @@ typedef struct SiFiveUState {
/*< public >*/
SiFiveUSoCState soc;
-
- void *fdt;
int fdt_size;
bool start_in_flash;
@@ -68,26 +76,36 @@ typedef struct SiFiveUState {
} SiFiveUState;
enum {
- SIFIVE_U_DEBUG,
- SIFIVE_U_MROM,
- SIFIVE_U_CLINT,
- SIFIVE_U_L2LIM,
- SIFIVE_U_PLIC,
- SIFIVE_U_PRCI,
- SIFIVE_U_UART0,
- SIFIVE_U_UART1,
- SIFIVE_U_GPIO,
- SIFIVE_U_OTP,
- SIFIVE_U_DMC,
- SIFIVE_U_FLASH0,
- SIFIVE_U_DRAM,
- SIFIVE_U_GEM,
- SIFIVE_U_GEM_MGMT
+ SIFIVE_U_DEV_DEBUG,
+ SIFIVE_U_DEV_MROM,
+ SIFIVE_U_DEV_CLINT,
+ SIFIVE_U_DEV_L2CC,
+ SIFIVE_U_DEV_PDMA,
+ SIFIVE_U_DEV_L2LIM,
+ SIFIVE_U_DEV_PLIC,
+ SIFIVE_U_DEV_PRCI,
+ SIFIVE_U_DEV_UART0,
+ SIFIVE_U_DEV_UART1,
+ SIFIVE_U_DEV_GPIO,
+ SIFIVE_U_DEV_QSPI0,
+ SIFIVE_U_DEV_QSPI2,
+ SIFIVE_U_DEV_OTP,
+ SIFIVE_U_DEV_DMC,
+ SIFIVE_U_DEV_FLASH0,
+ SIFIVE_U_DEV_DRAM,
+ SIFIVE_U_DEV_GEM,
+ SIFIVE_U_DEV_GEM_MGMT,
+ SIFIVE_U_DEV_PWM0,
+ SIFIVE_U_DEV_PWM1
};
enum {
+ SIFIVE_U_L2CC_IRQ0 = 1,
+ SIFIVE_U_L2CC_IRQ1 = 2,
+ SIFIVE_U_L2CC_IRQ2 = 3,
SIFIVE_U_UART0_IRQ = 4,
SIFIVE_U_UART1_IRQ = 5,
+ SIFIVE_U_QSPI2_IRQ = 6,
SIFIVE_U_GPIO_IRQ0 = 7,
SIFIVE_U_GPIO_IRQ1 = 8,
SIFIVE_U_GPIO_IRQ2 = 9,
@@ -104,7 +122,24 @@ enum {
SIFIVE_U_GPIO_IRQ13 = 20,
SIFIVE_U_GPIO_IRQ14 = 21,
SIFIVE_U_GPIO_IRQ15 = 22,
- SIFIVE_U_GEM_IRQ = 0x35
+ SIFIVE_U_PDMA_IRQ0 = 23,
+ SIFIVE_U_PDMA_IRQ1 = 24,
+ SIFIVE_U_PDMA_IRQ2 = 25,
+ SIFIVE_U_PDMA_IRQ3 = 26,
+ SIFIVE_U_PDMA_IRQ4 = 27,
+ SIFIVE_U_PDMA_IRQ5 = 28,
+ SIFIVE_U_PDMA_IRQ6 = 29,
+ SIFIVE_U_PDMA_IRQ7 = 30,
+ SIFIVE_U_PWM0_IRQ0 = 42,
+ SIFIVE_U_PWM0_IRQ1 = 43,
+ SIFIVE_U_PWM0_IRQ2 = 44,
+ SIFIVE_U_PWM0_IRQ3 = 45,
+ SIFIVE_U_PWM1_IRQ0 = 46,
+ SIFIVE_U_PWM1_IRQ1 = 47,
+ SIFIVE_U_PWM1_IRQ2 = 48,
+ SIFIVE_U_PWM1_IRQ3 = 49,
+ SIFIVE_U_QSPI0_IRQ = 51,
+ SIFIVE_U_GEM_IRQ = 53
};
enum {
@@ -121,10 +156,9 @@ enum {
#define SIFIVE_U_MANAGEMENT_CPU_COUNT 1
#define SIFIVE_U_COMPUTE_CPU_COUNT 4
-#define SIFIVE_U_PLIC_HART_CONFIG "MS"
#define SIFIVE_U_PLIC_NUM_SOURCES 54
#define SIFIVE_U_PLIC_NUM_PRIORITIES 7
-#define SIFIVE_U_PLIC_PRIORITY_BASE 0x04
+#define SIFIVE_U_PLIC_PRIORITY_BASE 0x00
#define SIFIVE_U_PLIC_PENDING_BASE 0x1000
#define SIFIVE_U_PLIC_ENABLE_BASE 0x2000
#define SIFIVE_U_PLIC_ENABLE_STRIDE 0x80
diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h
index 1cd72b85d6..0c2a223763 100644
--- a/include/hw/riscv/spike.h
+++ b/include/hw/riscv/spike.h
@@ -19,29 +19,31 @@
#ifndef HW_RISCV_SPIKE_H
#define HW_RISCV_SPIKE_H
+#include "hw/boards.h"
#include "hw/riscv/riscv_hart.h"
#include "hw/sysbus.h"
-typedef struct {
+#define SPIKE_CPUS_MAX 8
+#define SPIKE_SOCKETS_MAX 8
+
+#define TYPE_SPIKE_MACHINE MACHINE_TYPE_NAME("spike")
+typedef struct SpikeState SpikeState;
+DECLARE_INSTANCE_CHECKER(SpikeState, SPIKE_MACHINE,
+ TYPE_SPIKE_MACHINE)
+
+struct SpikeState {
/*< private >*/
- SysBusDevice parent_obj;
+ MachineState parent;
/*< public >*/
- RISCVHartArrayState soc;
- void *fdt;
- int fdt_size;
-} SpikeState;
+ RISCVHartArrayState soc[SPIKE_SOCKETS_MAX];
+};
enum {
SPIKE_MROM,
+ SPIKE_HTIF,
SPIKE_CLINT,
SPIKE_DRAM
};
-#if defined(TARGET_RISCV32)
-#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_BASE32
-#elif defined(TARGET_RISCV64)
-#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_BASE64
-#endif
-
#endif
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index e69355efaf..3db839160f 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -19,26 +19,50 @@
#ifndef HW_RISCV_VIRT_H
#define HW_RISCV_VIRT_H
+#include "hw/boards.h"
#include "hw/riscv/riscv_hart.h"
#include "hw/sysbus.h"
#include "hw/block/flash.h"
+#include "hw/intc/riscv_imsic.h"
+
+#define VIRT_CPUS_MAX_BITS 9
+#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
+#define VIRT_SOCKETS_MAX_BITS 2
+#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
-#define RISCV_VIRT_MACHINE(obj) \
- OBJECT_CHECK(RISCVVirtState, (obj), TYPE_RISCV_VIRT_MACHINE)
+typedef struct RISCVVirtState RISCVVirtState;
+DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
+ TYPE_RISCV_VIRT_MACHINE)
+
+typedef enum RISCVVirtAIAType {
+ VIRT_AIA_TYPE_NONE = 0,
+ VIRT_AIA_TYPE_APLIC,
+ VIRT_AIA_TYPE_APLIC_IMSIC,
+} RISCVVirtAIAType;
-typedef struct {
+struct RISCVVirtState {
/*< private >*/
MachineState parent;
/*< public >*/
- RISCVHartArrayState soc;
- DeviceState *plic;
+ Notifier machine_done;
+ DeviceState *platform_bus_dev;
+ RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
+ DeviceState *irqchip[VIRT_SOCKETS_MAX];
PFlashCFI01 *flash[2];
+ FWCfgState *fw_cfg;
- void *fdt;
int fdt_size;
-} RISCVVirtState;
+ bool have_aclint;
+ RISCVVirtAIAType aia_type;
+ int aia_guests;
+ char *oem_id;
+ char *oem_table_id;
+ OnOffAuto acpi;
+ const MemMapEntry *memmap;
+ struct GPEXHost *gpex_host;
+};
enum {
VIRT_DEBUG,
@@ -46,13 +70,20 @@ enum {
VIRT_TEST,
VIRT_RTC,
VIRT_CLINT,
+ VIRT_ACLINT_SSWI,
VIRT_PLIC,
+ VIRT_APLIC_M,
+ VIRT_APLIC_S,
VIRT_UART0,
VIRT_VIRTIO,
+ VIRT_FW_CFG,
+ VIRT_IMSIC_M,
+ VIRT_IMSIC_S,
VIRT_FLASH,
VIRT_DRAM,
VIRT_PCIE_MMIO,
VIRT_PCIE_PIO,
+ VIRT_PLATFORM_BUS,
VIRT_PCIE_ECAM
};
@@ -62,30 +93,64 @@ enum {
VIRTIO_IRQ = 1, /* 1 to 8 */
VIRTIO_COUNT = 8,
PCIE_IRQ = 0x20, /* 32 to 35 */
- VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
+ VIRT_PLATFORM_BUS_IRQ = 64, /* 64 to 95 */
};
-#define VIRT_PLIC_HART_CONFIG "MS"
-#define VIRT_PLIC_NUM_SOURCES 127
-#define VIRT_PLIC_NUM_PRIORITIES 7
-#define VIRT_PLIC_PRIORITY_BASE 0x04
+#define VIRT_PLATFORM_BUS_NUM_IRQS 32
+
+#define VIRT_IRQCHIP_NUM_MSIS 255
+#define VIRT_IRQCHIP_NUM_SOURCES 96
+#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
+#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3
+#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U)
+
+#define VIRT_PLIC_PRIORITY_BASE 0x00
#define VIRT_PLIC_PENDING_BASE 0x1000
#define VIRT_PLIC_ENABLE_BASE 0x2000
#define VIRT_PLIC_ENABLE_STRIDE 0x80
#define VIRT_PLIC_CONTEXT_BASE 0x200000
#define VIRT_PLIC_CONTEXT_STRIDE 0x1000
+#define VIRT_PLIC_SIZE(__num_context) \
+ (VIRT_PLIC_CONTEXT_BASE + (__num_context) * VIRT_PLIC_CONTEXT_STRIDE)
#define FDT_PCI_ADDR_CELLS 3
#define FDT_PCI_INT_CELLS 1
#define FDT_PLIC_ADDR_CELLS 0
#define FDT_PLIC_INT_CELLS 1
-#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \
- FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS)
+#define FDT_APLIC_INT_CELLS 2
+#define FDT_IMSIC_INT_CELLS 0
+#define FDT_MAX_INT_CELLS 2
+#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_MAX_INT_CELLS)
+#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_PLIC_INT_CELLS)
+#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
+ 1 + FDT_APLIC_INT_CELLS)
+
+bool virt_is_acpi_enabled(RISCVVirtState *s);
+void virt_acpi_setup(RISCVVirtState *vms);
+uint32_t imsic_num_bits(uint32_t count);
+
+/*
+ * The virt machine physical address space used by some of the devices
+ * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
+ * number of CPUs, and number of IMSIC guest files.
+ *
+ * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
+ * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
+ * of virt machine physical address space.
+ */
+
+#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
+#if VIRT_IMSIC_GROUP_MAX_SIZE < \
+ IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
+#error "Can't accommodate single IMSIC group in address space"
+#endif
-#if defined(TARGET_RISCV32)
-#define VIRT_CPU TYPE_RISCV_CPU_BASE32
-#elif defined(TARGET_RISCV64)
-#define VIRT_CPU TYPE_RISCV_CPU_BASE64
+#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
+ VIRT_IMSIC_GROUP_MAX_SIZE)
+#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
+#error "Can't accommodate all IMSIC groups in address space"
#endif
#endif
diff --git a/include/hw/rtc/allwinner-rtc.h b/include/hw/rtc/allwinner-rtc.h
index 7893f74795..bf415431cd 100644
--- a/include/hw/rtc/allwinner-rtc.h
+++ b/include/hw/rtc/allwinner-rtc.h
@@ -60,19 +60,14 @@
* @{
*/
-#define AW_RTC(obj) \
- OBJECT_CHECK(AwRtcState, (obj), TYPE_AW_RTC)
-#define AW_RTC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AwRtcClass, (klass), TYPE_AW_RTC)
-#define AW_RTC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AwRtcClass, (obj), TYPE_AW_RTC)
+OBJECT_DECLARE_TYPE(AwRtcState, AwRtcClass, AW_RTC)
/** @} */
/**
* Allwinner RTC per-object instance state.
*/
-typedef struct AwRtcState {
+struct AwRtcState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -92,7 +87,7 @@ typedef struct AwRtcState {
/** Array of hardware registers */
uint32_t regs[AW_RTC_REGS_NUM];
-} AwRtcState;
+};
/**
* Allwinner RTC class-level struct.
@@ -101,7 +96,7 @@ typedef struct AwRtcState {
* such that the generic code can use this struct to support
* all devices.
*/
-typedef struct AwRtcClass {
+struct AwRtcClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
@@ -129,6 +124,6 @@ typedef struct AwRtcClass {
*/
bool (*write)(AwRtcState *s, uint32_t offset, uint32_t data);
-} AwRtcClass;
+};
#endif /* HW_MISC_ALLWINNER_RTC_H */
diff --git a/include/hw/rtc/aspeed_rtc.h b/include/hw/rtc/aspeed_rtc.h
index b94a710268..596dfebb46 100644
--- a/include/hw/rtc/aspeed_rtc.h
+++ b/include/hw/rtc/aspeed_rtc.h
@@ -9,19 +9,20 @@
#define HW_RTC_ASPEED_RTC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
-typedef struct AspeedRtcState {
+struct AspeedRtcState {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
uint32_t reg[0x18];
- int offset;
+ int64_t offset;
-} AspeedRtcState;
+};
#define TYPE_ASPEED_RTC "aspeed.rtc"
-#define ASPEED_RTC(obj) OBJECT_CHECK(AspeedRtcState, (obj), TYPE_ASPEED_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedRtcState, ASPEED_RTC)
#endif /* HW_RTC_ASPEED_RTC_H */
diff --git a/include/hw/rtc/goldfish_rtc.h b/include/hw/rtc/goldfish_rtc.h
index 9bd8924f5f..162be33863 100644
--- a/include/hw/rtc/goldfish_rtc.h
+++ b/include/hw/rtc/goldfish_rtc.h
@@ -23,12 +23,12 @@
#define HW_RTC_GOLDFISH_RTC_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_GOLDFISH_RTC "goldfish_rtc"
-#define GOLDFISH_RTC(obj) \
- OBJECT_CHECK(GoldfishRTCState, (obj), TYPE_GOLDFISH_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(GoldfishRTCState, GOLDFISH_RTC)
-typedef struct GoldfishRTCState {
+struct GoldfishRTCState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -42,6 +42,8 @@ typedef struct GoldfishRTCState {
uint32_t irq_pending;
uint32_t irq_enabled;
uint32_t time_high;
-} GoldfishRTCState;
+
+ bool big_endian;
+};
#endif
diff --git a/include/hw/rtc/m48t59.h b/include/hw/rtc/m48t59.h
index e7ea4e8761..c14937476c 100644
--- a/include/hw/rtc/m48t59.h
+++ b/include/hw/rtc/m48t59.h
@@ -31,27 +31,20 @@
#define TYPE_NVRAM "nvram"
-#define NVRAM_CLASS(klass) \
- OBJECT_CLASS_CHECK(NvramClass, (klass), TYPE_NVRAM)
-#define NVRAM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NvramClass, (obj), TYPE_NVRAM)
+typedef struct NvramClass NvramClass;
+DECLARE_CLASS_CHECKERS(NvramClass, NVRAM,
+ TYPE_NVRAM)
#define NVRAM(obj) \
INTERFACE_CHECK(Nvram, (obj), TYPE_NVRAM)
typedef struct Nvram Nvram;
-typedef struct NvramClass {
+struct NvramClass {
InterfaceClass parent;
uint32_t (*read)(Nvram *obj, uint32_t addr);
void (*write)(Nvram *obj, uint32_t addr, uint32_t val);
void (*toggle_lock)(Nvram *obj, int lock);
-} NvramClass;
+};
-Nvram *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size,
- int base_year, int type);
-Nvram *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
- uint32_t io_base, uint16_t size, int base_year,
- int type);
-
-#endif /* HW_M48T59_H */
+#endif /* HW_RTC_M48T59_H */
diff --git a/include/hw/rtc/mc146818rtc.h b/include/hw/rtc/mc146818rtc.h
index 3713181b56..97cec0b3e8 100644
--- a/include/hw/rtc/mc146818rtc.h
+++ b/include/hw/rtc/mc146818rtc.h
@@ -9,21 +9,24 @@
#ifndef HW_RTC_MC146818RTC_H
#define HW_RTC_MC146818RTC_H
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-machine.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
#include "hw/isa/isa.h"
+#include "qom/object.h"
#define TYPE_MC146818_RTC "mc146818rtc"
-#define MC146818_RTC(obj) OBJECT_CHECK(RTCState, (obj), TYPE_MC146818_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(MC146818RtcState, MC146818_RTC)
-typedef struct RTCState {
+struct MC146818RtcState {
ISADevice parent_obj;
MemoryRegion io;
MemoryRegion coalesced_io;
uint8_t cmos_data[128];
uint8_t cmos_index;
+ uint8_t isairq;
+ uint16_t io_base;
int32_t base_year;
uint64_t base_rtc;
uint64_t last_update;
@@ -43,15 +46,15 @@ typedef struct RTCState {
Notifier clock_reset_notifier;
LostTickPolicy lost_tick_policy;
Notifier suspend_notifier;
- QLIST_ENTRY(RTCState) link;
-} RTCState;
+ QLIST_ENTRY(MC146818RtcState) link;
+};
#define RTC_ISA_IRQ 8
-#define RTC_ISA_BASE 0x70
-ISADevice *mc146818_rtc_init(ISABus *bus, int base_year,
- qemu_irq intercept_irq);
-void rtc_set_memory(ISADevice *dev, int addr, int val);
-int rtc_get_memory(ISADevice *dev, int addr);
+MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year,
+ qemu_irq intercept_irq);
+void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val);
+int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr);
+void qmp_rtc_reset_reinjection(Error **errp);
-#endif /* MC146818RTC_H */
+#endif /* HW_RTC_MC146818RTC_H */
diff --git a/include/hw/rtc/pl031.h b/include/hw/rtc/pl031.h
index e3cb1d646f..9fd4be1abb 100644
--- a/include/hw/rtc/pl031.h
+++ b/include/hw/rtc/pl031.h
@@ -16,11 +16,12 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_PL031 "pl031"
-#define PL031(obj) OBJECT_CHECK(PL031State, (obj), TYPE_PL031)
+OBJECT_DECLARE_SIMPLE_TYPE(PL031State, PL031)
-typedef struct PL031State {
+struct PL031State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -42,6 +43,6 @@ typedef struct PL031State {
uint32_t cr;
uint32_t im;
uint32_t is;
-} PL031State;
+};
#endif
diff --git a/include/hw/rtc/sun4v-rtc.h b/include/hw/rtc/sun4v-rtc.h
index fd868f6ed2..26a9eb6196 100644
--- a/include/hw/rtc/sun4v-rtc.h
+++ b/include/hw/rtc/sun4v-rtc.h
@@ -5,12 +5,12 @@
*
* Copyright (c) 2016 Artyom Tarasenko
*
- * This code is licensed under the GNU GPL v3 or (at your option) any later
+ * This code is licensed under the GNU GPL v2 or (at your option) any later
* version.
*/
-#ifndef HW_RTC_SUN4V
-#define HW_RTC_SUN4V
+#ifndef HW_RTC_SUN4V_RTC_H
+#define HW_RTC_SUN4V_RTC_H
#include "exec/hwaddr.h"
diff --git a/include/hw/rtc/xlnx-zynqmp-rtc.h b/include/hw/rtc/xlnx-zynqmp-rtc.h
index 6fa1cb2f43..f0c6a2d78a 100644
--- a/include/hw/rtc/xlnx-zynqmp-rtc.h
+++ b/include/hw/rtc/xlnx-zynqmp-rtc.h
@@ -24,16 +24,16 @@
* THE SOFTWARE.
*/
-#ifndef HW_RTC_XLNX_ZYNQMP_H
-#define HW_RTC_XLNX_ZYNQMP_H
+#ifndef HW_RTC_XLNX_ZYNQMP_RTC_H
+#define HW_RTC_XLNX_ZYNQMP_RTC_H
#include "hw/register.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_XLNX_ZYNQMP_RTC "xlnx-zynmp.rtc"
-#define XLNX_ZYNQMP_RTC(obj) \
- OBJECT_CHECK(XlnxZynqMPRTC, (obj), TYPE_XLNX_ZYNQMP_RTC)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPRTC, XLNX_ZYNQMP_RTC)
REG32(SET_TIME_WRITE, 0x0)
REG32(SET_TIME_READ, 0x4)
@@ -77,7 +77,7 @@ REG32(SAFETY_CHK, 0x50)
#define XLNX_ZYNQMP_RTC_R_MAX (R_SAFETY_CHK + 1)
-typedef struct XlnxZynqMPRTC {
+struct XlnxZynqMPRTC {
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq_rtc_int;
@@ -87,6 +87,6 @@ typedef struct XlnxZynqMPRTC {
uint32_t regs[XLNX_ZYNQMP_RTC_R_MAX];
RegisterInfo regs_info[XLNX_ZYNQMP_RTC_R_MAX];
-} XlnxZynqMPRTC;
+};
#endif
diff --git a/include/hw/rx/rx62n.h b/include/hw/rx/rx62n.h
index aa94758c27..766fe0e435 100644
--- a/include/hw/rx/rx62n.h
+++ b/include/hw/rx/rx62n.h
@@ -21,18 +21,20 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef HW_RX_RX62N_MCU_H
-#define HW_RX_RX62N_MCU_H
+#ifndef HW_RX_RX62N_H
+#define HW_RX_RX62N_H
#include "target/rx/cpu.h"
#include "hw/intc/rx_icu.h"
#include "hw/timer/renesas_tmr.h"
#include "hw/timer/renesas_cmt.h"
#include "hw/char/renesas_sci.h"
-#include "qemu/units.h"
+#include "qom/object.h"
#define TYPE_RX62N_MCU "rx62n-mcu"
-#define RX62N_MCU(obj) OBJECT_CHECK(RX62NState, (obj), TYPE_RX62N_MCU)
+typedef struct RX62NState RX62NState;
+DECLARE_INSTANCE_CHECKER(RX62NState, RX62N_MCU,
+ TYPE_RX62N_MCU)
#define TYPE_R5F562N7_MCU "r5f562n7-mcu"
#define TYPE_R5F562N8_MCU "r5f562n8-mcu"
@@ -45,7 +47,7 @@
#define RX62N_NR_CMT 2
#define RX62N_NR_SCI 6
-typedef struct RX62NState {
+struct RX62NState {
/*< private >*/
DeviceState parent_obj;
/*< public >*/
@@ -65,12 +67,11 @@ typedef struct RX62NState {
MemoryRegion iomem2;
MemoryRegion iomem3;
MemoryRegion c_flash;
- qemu_irq irq[NR_IRQS];
/* Input Clock (XTAL) frequency */
uint32_t xtal_freq_hz;
/* Peripheral Module Clock frequency */
uint32_t pclk_freq_hz;
-} RX62NState;
+};
#endif
diff --git a/include/hw/s390x/3270-ccw.h b/include/hw/s390x/3270-ccw.h
index 9d1d18e2bd..1439882294 100644
--- a/include/hw/s390x/3270-ccw.h
+++ b/include/hw/s390x/3270-ccw.h
@@ -16,6 +16,7 @@
#include "hw/sysbus.h"
#include "hw/s390x/css.h"
#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
#define EMULATED_CCW_3270_CU_TYPE 0x3270
#define EMULATED_CCW_3270_CHPID_TYPE 0x1a
@@ -30,23 +31,18 @@
#define TC_EWRITEA 0x0d /* Erase write alternate */
#define TC_WRITESF 0x11 /* Write structured field */
-#define EMULATED_CCW_3270(obj) \
- OBJECT_CHECK(EmulatedCcw3270Device, (obj), TYPE_EMULATED_CCW_3270)
-#define EMULATED_CCW_3270_CLASS(klass) \
- OBJECT_CLASS_CHECK(EmulatedCcw3270Class, (klass), TYPE_EMULATED_CCW_3270)
-#define EMULATED_CCW_3270_GET_CLASS(obj) \
- OBJECT_GET_CLASS(EmulatedCcw3270Class, (obj), TYPE_EMULATED_CCW_3270)
+OBJECT_DECLARE_TYPE(EmulatedCcw3270Device, EmulatedCcw3270Class, EMULATED_CCW_3270)
-typedef struct EmulatedCcw3270Device {
+struct EmulatedCcw3270Device {
CcwDevice parent_obj;
-} EmulatedCcw3270Device;
+};
-typedef struct EmulatedCcw3270Class {
+struct EmulatedCcw3270Class {
CCWDeviceClass parent_class;
void (*init)(EmulatedCcw3270Device *, Error **);
int (*read_payload_3270)(EmulatedCcw3270Device *);
int (*write_payload_3270)(EmulatedCcw3270Device *, uint8_t);
-} EmulatedCcw3270Class;
+};
#endif
diff --git a/include/hw/s390x/ap-device.h b/include/hw/s390x/ap-device.h
index 8df9cd2954..e502745de5 100644
--- a/include/hw/s390x/ap-device.h
+++ b/include/hw/s390x/ap-device.h
@@ -12,14 +12,16 @@
#define HW_S390X_AP_DEVICE_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
-#define AP_DEVICE_TYPE "ap-device"
+#define TYPE_AP_DEVICE "ap-device"
-typedef struct APDevice {
+struct APDevice {
DeviceState parent_obj;
-} APDevice;
+};
+typedef struct APDevice APDevice;
-#define AP_DEVICE(obj) \
- OBJECT_CHECK(APDevice, (obj), AP_DEVICE_TYPE)
+DECLARE_INSTANCE_CHECKER(APDevice, AP_DEVICE,
+ TYPE_AP_DEVICE)
#endif /* HW_S390X_AP_DEVICE_H */
diff --git a/include/hw/s390x/cpu-topology.h b/include/hw/s390x/cpu-topology.h
new file mode 100644
index 0000000000..c064f427e9
--- /dev/null
+++ b/include/hw/s390x/cpu-topology.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU Topology
+ *
+ * Copyright IBM Corp. 2022, 2023
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+#ifndef HW_S390X_CPU_TOPOLOGY_H
+#define HW_S390X_CPU_TOPOLOGY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qemu/queue.h"
+#include "hw/boards.h"
+#include "qapi/qapi-types-machine-target.h"
+
+#define S390_TOPOLOGY_CPU_IFL 0x03
+
+typedef struct S390TopologyId {
+ uint8_t sentinel;
+ uint8_t drawer;
+ uint8_t book;
+ uint8_t socket;
+ uint8_t type;
+ uint8_t vertical:1;
+ uint8_t entitlement:2;
+ uint8_t dedicated;
+ uint8_t origin;
+} S390TopologyId;
+
+typedef struct S390TopologyEntry {
+ QTAILQ_ENTRY(S390TopologyEntry) next;
+ S390TopologyId id;
+ uint64_t mask;
+} S390TopologyEntry;
+
+typedef struct S390Topology {
+ uint8_t *cores_per_socket;
+ CpuS390Polarization polarization;
+} S390Topology;
+
+typedef QTAILQ_HEAD(, S390TopologyEntry) S390TopologyList;
+
+#ifdef CONFIG_KVM
+bool s390_has_topology(void);
+void s390_topology_setup_cpu(MachineState *ms, S390CPU *cpu, Error **errp);
+void s390_topology_reset(void);
+#else
+static inline bool s390_has_topology(void)
+{
+ return false;
+}
+static inline void s390_topology_setup_cpu(MachineState *ms,
+ S390CPU *cpu,
+ Error **errp) {}
+static inline void s390_topology_reset(void)
+{
+ /* Unreachable, CPU topology not implemented for TCG */
+ assert(false);
+}
+#endif
+
+extern S390Topology s390_topology;
+
+static inline int s390_std_socket(int n, CpuTopology *smp)
+{
+ return (n / smp->cores) % smp->sockets;
+}
+
+static inline int s390_std_book(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets)) % smp->books;
+}
+
+static inline int s390_std_drawer(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets * smp->books)) % smp->drawers;
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif
diff --git a/include/hw/s390x/css-bridge.h b/include/hw/s390x/css-bridge.h
index f7ed2d9a03..deb606d71f 100644
--- a/include/hw/s390x/css-bridge.h
+++ b/include/hw/s390x/css-bridge.h
@@ -17,23 +17,21 @@
#include "hw/sysbus.h"
/* virtual css bridge */
-typedef struct VirtualCssBridge {
+struct VirtualCssBridge {
SysBusDevice sysbus_dev;
bool css_dev_path;
-} VirtualCssBridge;
+};
#define TYPE_VIRTUAL_CSS_BRIDGE "virtual-css-bridge"
-#define VIRTUAL_CSS_BRIDGE(obj) \
- OBJECT_CHECK(VirtualCssBridge, (obj), TYPE_VIRTUAL_CSS_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtualCssBridge, VIRTUAL_CSS_BRIDGE)
/* virtual css bus type */
-typedef struct VirtualCssBus {
+struct VirtualCssBus {
BusState parent_obj;
-} VirtualCssBus;
+};
#define TYPE_VIRTUAL_CSS_BUS "virtual-css-bus"
-#define VIRTUAL_CSS_BUS(obj) \
- OBJECT_CHECK(VirtualCssBus, (obj), TYPE_VIRTUAL_CSS_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtualCssBus, VIRTUAL_CSS_BUS)
VirtualCssBus *virtual_css_bus_init(void);
#endif
diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h
index 08c869ab0a..ba72ee3dd2 100644
--- a/include/hw/s390x/css.h
+++ b/include/hw/s390x/css.h
@@ -12,7 +12,6 @@
#ifndef CSS_H
#define CSS_H
-#include "cpu.h"
#include "hw/s390x/adapter.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/ioinst.h"
@@ -133,19 +132,22 @@ struct SubchDev {
bool ccw_fmt_1;
bool thinint_active;
uint8_t ccw_no_data_cnt;
- uint16_t migrated_schid; /* used for missmatch detection */
+ uint16_t migrated_schid; /* used for mismatch detection */
CcwDataStream cds;
/* transport-provided data: */
int (*ccw_cb) (SubchDev *, CCW1);
void (*disable_cb)(SubchDev *);
IOInstEnding (*do_subchannel_work) (SubchDev *);
+ void (*irb_cb)(SubchDev *, IRB *);
SenseId id;
void *driver_data;
+ ESW esw;
};
static inline void sch_gen_unit_exception(SubchDev *sch)
{
- sch->curr_status.scsw.ctrl &= ~SCSW_ACTL_START_PEND;
+ sch->curr_status.scsw.ctrl &= ~(SCSW_ACTL_DEVICE_ACTIVE |
+ SCSW_ACTL_SUBCH_ACTIVE);
sch->curr_status.scsw.ctrl |= SCSW_STCTL_PRIMARY |
SCSW_STCTL_SECONDARY |
SCSW_STCTL_ALERT |
@@ -202,6 +204,7 @@ int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id);
unsigned int css_find_free_chpid(uint8_t cssid);
uint16_t css_build_subchannel_id(SubchDev *sch);
void copy_scsw_to_guest(SCSW *dest, const SCSW *src);
+void copy_esw_to_guest(ESW *dest, const ESW *src);
void css_inject_io_interrupt(SubchDev *sch);
void css_reset(void);
void css_reset_sch(SubchDev *sch);
@@ -216,6 +219,8 @@ void css_clear_sei_pending(void);
IOInstEnding s390_ccw_cmd_request(SubchDev *sch);
IOInstEnding do_subchannel_work_virtual(SubchDev *sub);
IOInstEnding do_subchannel_work_passthrough(SubchDev *sub);
+void build_irb_passthrough(SubchDev *sch, IRB *irb);
+void build_irb_virtual(SubchDev *sch, IRB *irb);
int s390_ccw_halt(SubchDev *sch);
int s390_ccw_clear(SubchDev *sch);
@@ -228,17 +233,11 @@ typedef enum {
} CssIoAdapterType;
void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc);
-int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode);
+int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode);
uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc);
void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
uint8_t flags, Error **errp);
-#ifndef CONFIG_KVM
-#define S390_ADAPTER_SUPPRESSIBLE 0x01
-#else
-#define S390_ADAPTER_SUPPRESSIBLE KVM_S390_ADAPTER_SUPPRESSIBLE
-#endif
-
#ifndef CONFIG_USER_ONLY
SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
uint16_t schid);
diff --git a/include/hw/s390x/event-facility.h b/include/hw/s390x/event-facility.h
index 700a610f33..3ffd575d8f 100644
--- a/include/hw/s390x/event-facility.h
+++ b/include/hw/s390x/event-facility.h
@@ -18,6 +18,7 @@
#include "qemu/thread.h"
#include "hw/qdev-core.h"
#include "hw/s390x/sclp.h"
+#include "qom/object.h"
/* SCLP event types */
#define SCLP_EVENT_OPRTNS_COMMAND 0x01
@@ -41,12 +42,8 @@
#define SCLP_SELECTIVE_READ 0x01
#define TYPE_SCLP_EVENT "s390-sclp-event-type"
-#define SCLP_EVENT(obj) \
- OBJECT_CHECK(SCLPEvent, (obj), TYPE_SCLP_EVENT)
-#define SCLP_EVENT_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCLPEventClass, (klass), TYPE_SCLP_EVENT)
-#define SCLP_EVENT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCLPEventClass, (obj), TYPE_SCLP_EVENT)
+OBJECT_DECLARE_TYPE(SCLPEvent, SCLPEventClass,
+ SCLP_EVENT)
#define TYPE_SCLP_CPU_HOTPLUG "sclp-cpu-hotplug"
#define TYPE_SCLP_QUIESCE "sclpquiesce"
@@ -169,13 +166,13 @@ typedef struct ReadEventData {
};
} QEMU_PACKED ReadEventData;
-typedef struct SCLPEvent {
+struct SCLPEvent {
DeviceState qdev;
bool event_pending;
char *name;
-} SCLPEvent;
+};
-typedef struct SCLPEventClass {
+struct SCLPEventClass {
DeviceClass parent_class;
int (*init)(SCLPEvent *event);
@@ -192,23 +189,19 @@ typedef struct SCLPEventClass {
/* can we handle this event type? */
bool (*can_handle_event)(uint8_t type);
-} SCLPEventClass;
+};
#define TYPE_SCLP_EVENT_FACILITY "s390-sclp-event-facility"
-#define EVENT_FACILITY(obj) \
- OBJECT_CHECK(SCLPEventFacility, (obj), TYPE_SCLP_EVENT_FACILITY)
-#define EVENT_FACILITY_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCLPEventFacilityClass, (klass), \
- TYPE_SCLP_EVENT_FACILITY)
-#define EVENT_FACILITY_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCLPEventFacilityClass, (obj), \
- TYPE_SCLP_EVENT_FACILITY)
-
-typedef struct SCLPEventFacilityClass {
+typedef struct SCLPEventFacility SCLPEventFacility;
+typedef struct SCLPEventFacilityClass SCLPEventFacilityClass;
+DECLARE_OBJ_CHECKERS(SCLPEventFacility, SCLPEventFacilityClass,
+ EVENT_FACILITY, TYPE_SCLP_EVENT_FACILITY)
+
+struct SCLPEventFacilityClass {
SysBusDeviceClass parent_class;
void (*command_handler)(SCLPEventFacility *ef, SCCB *sccb, uint64_t code);
bool (*event_pending)(SCLPEventFacility *ef);
-} SCLPEventFacilityClass;
+};
BusState *sclp_get_event_facility_bus(void);
diff --git a/include/hw/s390x/ioinst.h b/include/hw/s390x/ioinst.h
index c6737a30d4..ea8d0f2444 100644
--- a/include/hw/s390x/ioinst.h
+++ b/include/hw/s390x/ioinst.h
@@ -107,7 +107,7 @@ QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong");
#define PMCW_FLAGS_MASK_MP 0x0004
#define PMCW_FLAGS_MASK_TF 0x0002
#define PMCW_FLAGS_MASK_DNV 0x0001
-#define PMCW_FLAGS_MASK_INVALID 0x0700
+#define PMCW_FLAGS_MASK_INVALID 0xc300
#define PMCW_CHARS_MASK_ST 0x00e00000
#define PMCW_CHARS_MASK_MBFC 0x00000004
@@ -123,10 +123,20 @@ typedef struct SCHIB {
uint8_t mda[4];
} QEMU_PACKED SCHIB;
+/* format-0 extended-status word */
+typedef struct ESW {
+ uint32_t word0; /* subchannel logout for format 0 */
+ uint32_t erw;
+ uint64_t word2; /* failing-storage address for format 0 */
+ uint32_t word4; /* secondary-CCW address for format 0 */
+} QEMU_PACKED ESW;
+
+#define ESW_ERW_SENSE 0x01000000
+
/* interruption response block */
typedef struct IRB {
SCSW scsw;
- uint32_t esw[5];
+ ESW esw;
uint32_t ecw[8];
uint32_t emw[8];
} IRB;
diff --git a/include/hw/s390x/pv.h b/include/hw/s390x/pv.h
deleted file mode 100644
index aee758bc2d..0000000000
--- a/include/hw/s390x/pv.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Protected Virtualization header
- *
- * Copyright IBM Corp. 2020
- * Author(s):
- * Janosch Frank <frankja@linux.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or (at
- * your option) any later version. See the COPYING file in the top-level
- * directory.
- */
-#ifndef HW_S390_PV_H
-#define HW_S390_PV_H
-
-#ifdef CONFIG_KVM
-#include "cpu.h"
-#include "hw/s390x/s390-virtio-ccw.h"
-
-static inline bool s390_is_pv(void)
-{
- static S390CcwMachineState *ccw;
- Object *obj;
-
- if (ccw) {
- return ccw->pv;
- }
-
- /* we have to bail out for the "none" machine */
- obj = object_dynamic_cast(qdev_get_machine(),
- TYPE_S390_CCW_MACHINE);
- if (!obj) {
- return false;
- }
- ccw = S390_CCW_MACHINE(obj);
- return ccw->pv;
-}
-
-int s390_pv_vm_enable(void);
-void s390_pv_vm_disable(void);
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length);
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
-void s390_pv_prep_reset(void);
-int s390_pv_verify(void);
-void s390_pv_unshare(void);
-void s390_pv_inject_reset_error(CPUState *cs);
-#else /* CONFIG_KVM */
-static inline bool s390_is_pv(void) { return false; }
-static inline int s390_pv_vm_enable(void) { return 0; }
-static inline void s390_pv_vm_disable(void) {}
-static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; }
-static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
-static inline void s390_pv_prep_reset(void) {}
-static inline int s390_pv_verify(void) { return 0; }
-static inline void s390_pv_unshare(void) {}
-static inline void s390_pv_inject_reset_error(CPUState *cs) {};
-#endif /* CONFIG_KVM */
-
-#endif /* HW_S390_PV_H */
diff --git a/include/hw/s390x/s390-ccw.h b/include/hw/s390x/s390-ccw.h
index d8e08b5f4c..2c807ee3a1 100644
--- a/include/hw/s390x/s390-ccw.h
+++ b/include/hw/s390x/s390-ccw.h
@@ -14,23 +14,22 @@
#define HW_S390_CCW_H
#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
#define TYPE_S390_CCW "s390-ccw"
-#define S390_CCW_DEVICE(obj) \
- OBJECT_CHECK(S390CCWDevice, (obj), TYPE_S390_CCW)
-#define S390_CCW_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390CCWDeviceClass, (klass), TYPE_S390_CCW)
-#define S390_CCW_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390CCWDeviceClass, (obj), TYPE_S390_CCW)
+typedef struct S390CCWDevice S390CCWDevice;
+typedef struct S390CCWDeviceClass S390CCWDeviceClass;
+DECLARE_OBJ_CHECKERS(S390CCWDevice, S390CCWDeviceClass,
+ S390_CCW_DEVICE, TYPE_S390_CCW)
-typedef struct S390CCWDevice {
+struct S390CCWDevice {
CcwDevice parent_obj;
CssDevId hostid;
char *mdevid;
int32_t bootindex;
-} S390CCWDevice;
+};
-typedef struct S390CCWDeviceClass {
+struct S390CCWDeviceClass {
CCWDeviceClass parent_class;
void (*realize)(S390CCWDevice *dev, char *sysfsdev, Error **errp);
void (*unrealize)(S390CCWDevice *dev);
@@ -38,6 +37,6 @@ typedef struct S390CCWDeviceClass {
int (*handle_halt) (SubchDev *sch);
int (*handle_clear) (SubchDev *sch);
IOInstEnding (*handle_store) (SubchDev *sch);
-} S390CCWDeviceClass;
+};
#endif
diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h
new file mode 100644
index 0000000000..2c43ea123f
--- /dev/null
+++ b/include/hw/s390x/s390-pci-bus.h
@@ -0,0 +1,406 @@
+/*
+ * s390 PCI BUS definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_BUS_H
+#define HW_S390_PCI_BUS_H
+
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/css.h"
+#include "hw/s390x/s390-pci-clp.h"
+#include "qom/object.h"
+
+#define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost"
+#define TYPE_S390_PCI_BUS "s390-pcibus"
+#define TYPE_S390_PCI_DEVICE "zpci"
+#define TYPE_S390_PCI_IOMMU "s390-pci-iommu"
+#define TYPE_S390_IOMMU_MEMORY_REGION "s390-iommu-memory-region"
+#define FH_MASK_ENABLE 0x80000000
+#define FH_MASK_INSTANCE 0x7f000000
+#define FH_MASK_SHM 0x00ff0000
+#define FH_MASK_INDEX 0x0000ffff
+#define FH_SHM_VFIO 0x00010000
+#define FH_SHM_EMUL 0x00020000
+#define ZPCI_MAX_FID 0xffffffff
+#define ZPCI_MAX_UID 0xffff
+#define UID_UNDEFINED 0
+#define UID_CHECKING_ENABLED 0x01
+#define ZPCI_DTSM 0x40
+
+/* zPCI Function Types */
+#define ZPCI_PFT_ISM 5
+
+OBJECT_DECLARE_SIMPLE_TYPE(S390pciState, S390_PCI_HOST_BRIDGE)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBus, S390_PCI_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIBusDevice, S390_PCI_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(S390PCIIOMMU, S390_PCI_IOMMU)
+
+#define HP_EVENT_TO_CONFIGURED 0x0301
+#define HP_EVENT_RESERVED_TO_STANDBY 0x0302
+#define HP_EVENT_DECONFIGURE_REQUEST 0x0303
+#define HP_EVENT_CONFIGURED_TO_STBRES 0x0304
+#define HP_EVENT_STANDBY_TO_RESERVED 0x0308
+
+#define ERR_EVENT_INVALAS 0x1
+#define ERR_EVENT_OORANGE 0x2
+#define ERR_EVENT_INVALTF 0x3
+#define ERR_EVENT_TPROTE 0x4
+#define ERR_EVENT_APROTE 0x5
+#define ERR_EVENT_KEYE 0x6
+#define ERR_EVENT_INVALTE 0x7
+#define ERR_EVENT_INVALTL 0x8
+#define ERR_EVENT_TT 0x9
+#define ERR_EVENT_INVALMS 0xa
+#define ERR_EVENT_SERR 0xb
+#define ERR_EVENT_NOMSI 0x10
+#define ERR_EVENT_INVALBV 0x11
+#define ERR_EVENT_AIBV 0x12
+#define ERR_EVENT_AIRERR 0x13
+#define ERR_EVENT_FMBA 0x2a
+#define ERR_EVENT_FMBUP 0x2b
+#define ERR_EVENT_FMBPRO 0x2c
+#define ERR_EVENT_CCONF 0x30
+#define ERR_EVENT_SERVAC 0x3a
+#define ERR_EVENT_PERMERR 0x3b
+
+#define ERR_EVENT_Q_BIT 0x2
+#define ERR_EVENT_MVN_OFFSET 16
+
+#define ZPCI_MSI_VEC_BITS 11
+#define ZPCI_MSI_VEC_MASK 0x7ff
+
+#define ZPCI_MSI_ADDR 0xfe00000000000000ULL
+#define ZPCI_SDMA_ADDR 0x100000000ULL
+#define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL
+
+#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
+
+/* I/O Translation Anchor (IOTA) */
+enum ZpciIoatDtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800ULL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY |\
+ ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffULL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + TARGET_PAGE_BITS)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffULL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffULL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+#define ZPCI_SFAA_MASK (~((1ULL << 20) - 1))
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffULL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+#define ZPCI_TABLE_FC 0x400
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+#define ZPCI_ETT_RT 1
+#define ZPCI_ETT_ST 0
+#define ZPCI_ETT_PT -1
+
+/* PCI Function States
+ *
+ * reserved: default; device has just been plugged or is in progress of being
+ * unplugged
+ * standby: device is present but not configured; transition from any
+ * configured state/to this state via sclp configure/deconfigure
+ *
+ * The following states make up the "configured" meta-state:
+ * disabled: device is configured but not enabled; transition between this
+ * state and enabled via clp enable/disable
+ * enabled: device is ready for use; transition to disabled via clp disable;
+ * may enter an error state
+ * blocked: ignore all DMA and interrupts; transition back to enabled or from
+ * error state via mpcifc
+ * error: an error occurred; transition back to enabled via mpcifc
+ * permanent error: an unrecoverable error occurred; transition to standby via
+ * sclp deconfigure
+ */
+typedef enum {
+ ZPCI_FS_RESERVED,
+ ZPCI_FS_STANDBY,
+ ZPCI_FS_DISABLED,
+ ZPCI_FS_ENABLED,
+ ZPCI_FS_BLOCKED,
+ ZPCI_FS_ERROR,
+ ZPCI_FS_PERMANENT_ERROR,
+} ZpciState;
+
+typedef struct SeiContainer {
+ QTAILQ_ENTRY(SeiContainer) link;
+ uint32_t fid;
+ uint32_t fh;
+ uint8_t cc;
+ uint16_t pec;
+ uint64_t faddr;
+ uint32_t e;
+} SeiContainer;
+
+typedef struct PciCcdfErr {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t e;
+ uint64_t faddr;
+ uint32_t reserved3;
+ uint16_t reserved4;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfErr;
+
+typedef struct PciCcdfAvail {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint16_t reserved6;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfAvail;
+
+typedef struct ChscSeiNt2Res {
+ uint16_t length;
+ uint16_t code;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t nt;
+ uint8_t flags;
+ uint8_t reserved3;
+ uint8_t reserved4;
+ uint8_t cc;
+ uint32_t reserved5[13];
+ uint8_t ccdf[4016];
+} QEMU_PACKED ChscSeiNt2Res;
+
+typedef struct S390MsixInfo {
+ uint8_t table_bar;
+ uint8_t pba_bar;
+ uint16_t entries;
+ uint32_t table_offset;
+ uint32_t pba_offset;
+} S390MsixInfo;
+
+typedef struct S390IOTLBEntry {
+ uint64_t iova;
+ uint64_t translated_addr;
+ uint64_t len;
+ uint64_t perm;
+} S390IOTLBEntry;
+
+typedef struct S390PCIDMACount {
+ int id;
+ int users;
+ uint32_t avail;
+ QTAILQ_ENTRY(S390PCIDMACount) link;
+} S390PCIDMACount;
+
+struct S390PCIIOMMU {
+ Object parent_obj;
+ S390PCIBusDevice *pbdev;
+ AddressSpace as;
+ MemoryRegion mr;
+ IOMMUMemoryRegion iommu_mr;
+ bool enabled;
+ uint64_t g_iota;
+ uint64_t pba;
+ uint64_t pal;
+ uint64_t max_dma_limit;
+ GHashTable *iotlb;
+ S390PCIDMACount *dma_limit;
+};
+
+typedef struct S390PCIIOMMUTable {
+ uint64_t key;
+ S390PCIIOMMU *iommu[PCI_SLOT_MAX];
+} S390PCIIOMMUTable;
+
+/* Function Measurement Block */
+#define DEFAULT_MUI 4000
+#define UPDATE_U_BIT 0x1ULL
+#define FMBK_MASK 0xfULL
+
+typedef struct ZpciFmbFmt0 {
+ uint64_t dma_rbytes;
+ uint64_t dma_wbytes;
+} ZpciFmbFmt0;
+
+#define ZPCI_FMB_CNT_LD 0
+#define ZPCI_FMB_CNT_ST 1
+#define ZPCI_FMB_CNT_STB 2
+#define ZPCI_FMB_CNT_RPCIT 3
+#define ZPCI_FMB_CNT_MAX 4
+
+#define ZPCI_FMB_FORMAT 0
+
+typedef struct ZpciFmb {
+ uint32_t format;
+ uint32_t sample;
+ uint64_t last_update;
+ uint64_t counter[ZPCI_FMB_CNT_MAX];
+ ZpciFmbFmt0 fmt0;
+} ZpciFmb;
+QEMU_BUILD_BUG_MSG(offsetof(ZpciFmb, fmt0) != 48, "padding in ZpciFmb");
+
+#define ZPCI_DEFAULT_FN_GRP 0xFF
+#define ZPCI_SIM_GRP_START 0xF0
+typedef struct S390PCIGroup {
+ ClpRspQueryPciGrp zpci_group;
+ int id;
+ int host_id;
+ QTAILQ_ENTRY(S390PCIGroup) link;
+} S390PCIGroup;
+S390PCIGroup *s390_group_create(int id, int host_id);
+S390PCIGroup *s390_group_find(int id);
+S390PCIGroup *s390_group_find_host_sim(int host_id);
+
+struct S390PCIBusDevice {
+ DeviceState qdev;
+ PCIDevice *pdev;
+ ZpciState state;
+ char *target;
+ uint16_t uid;
+ uint32_t idx;
+ uint32_t fh;
+ uint32_t fid;
+ bool fid_defined;
+ uint64_t fmb_addr;
+ ZpciFmb fmb;
+ QEMUTimer *fmb_timer;
+ uint8_t isc;
+ uint16_t noi;
+ uint16_t maxstbl;
+ uint8_t sum;
+ uint8_t pft;
+ S390PCIGroup *pci_group;
+ ClpRspQueryPci zpci_fn;
+ S390MsixInfo msix;
+ AdapterRoutes routes;
+ S390PCIIOMMU *iommu;
+ MemoryRegion msix_notify_mr;
+ IndAddr *summary_ind;
+ IndAddr *indicator;
+ Notifier shutdown_notifier;
+ bool pci_unplug_request_processed;
+ bool unplug_requested;
+ bool interp;
+ bool forwarding_assist;
+ bool aif;
+ QTAILQ_ENTRY(S390PCIBusDevice) link;
+};
+
+struct S390PCIBus {
+ BusState qbus;
+};
+
+struct S390pciState {
+ PCIHostState parent_obj;
+ uint32_t next_idx;
+ int bus_no;
+ S390PCIBus *bus;
+ GHashTable *iommu_table;
+ GHashTable *zpci_table;
+ QTAILQ_HEAD(, SeiContainer) pending_sei;
+ QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs;
+ QTAILQ_HEAD(, S390PCIDMACount) zpci_dma_limit;
+ QTAILQ_HEAD(, S390PCIGroup) zpci_groups;
+ uint8_t next_sim_grp;
+};
+
+S390pciState *s390_get_phb(void);
+int pci_chsc_sei_nt2_get_event(void *res);
+int pci_chsc_sei_nt2_have_event(void);
+void s390_pci_sclp_configure(SCCB *sccb);
+void s390_pci_sclp_deconfigure(SCCB *sccb);
+void s390_pci_iommu_enable(S390PCIIOMMU *iommu);
+void s390_pci_iommu_disable(S390PCIIOMMU *iommu);
+void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
+ uint64_t faddr, uint32_t e);
+uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
+ S390IOTLBEntry *entry);
+S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx);
+S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh);
+S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid);
+S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
+ const char *target);
+S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
+ S390PCIBusDevice *pbdev);
+void s390_pci_ism_reset(void);
+
+#endif
diff --git a/include/hw/s390x/s390-pci-clp.h b/include/hw/s390x/s390-pci-clp.h
new file mode 100644
index 0000000000..03b7f9ba5f
--- /dev/null
+++ b/include/hw/s390x/s390-pci-clp.h
@@ -0,0 +1,216 @@
+/*
+ * s390 CLP instruction definitions
+ *
+ * Copyright 2019 IBM Corp.
+ * Author(s): Pierre Morel <pmorel@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_CLP_H
+#define HW_S390_PCI_CLP_H
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE 4096
+#define PCI_BAR_COUNT 6
+#define PCI_MAX_FUNCTIONS 4096
+
+typedef struct ClpReqHdr {
+ uint16_t len;
+ uint16_t cmd;
+} QEMU_PACKED ClpReqHdr;
+
+typedef struct ClpRspHdr {
+ uint16_t len;
+ uint16_t rsp;
+} QEMU_PACKED ClpRspHdr;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+typedef struct ClpFhListEntry {
+ uint16_t device_id;
+ uint16_t vendor_id;
+#define CLP_FHLIST_MASK_CONFIG 0x80000000
+ uint32_t config;
+ uint32_t fid;
+ uint32_t fh;
+} QEMU_PACKED ClpFhListEntry;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(ClpFhListEntry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+#define CLP_PFIP_NR_SEGMENTS 4
+
+#define CLP_MASK_FMT 0xf0000000
+
+/* List PCI functions request */
+typedef struct ClpReqListPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint64_t reserved2;
+} QEMU_PACKED ClpReqListPci;
+
+/* List PCI functions response */
+typedef struct ClpRspListPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint32_t mdd;
+ uint16_t max_fn;
+ uint8_t flags;
+ uint8_t entry_size;
+ ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} QEMU_PACKED ClpRspListPci;
+
+/* Query PCI function request */
+typedef struct ClpReqQueryPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPci;
+
+/* Query PCI function response */
+typedef struct ClpRspQueryPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint16_t vfn; /* virtual fn number */
+#define CLP_RSP_QPCI_MASK_UTIL 0x01
+ uint8_t flags;
+ uint8_t pfgid;
+ uint32_t fid; /* pci function id */
+ uint8_t bar_size[PCI_BAR_COUNT];
+ uint16_t pchid;
+ uint32_t bar[PCI_BAR_COUNT];
+ uint8_t pfip[CLP_PFIP_NR_SEGMENTS];
+ uint16_t reserved2;
+ uint8_t fmbl;
+ uint8_t pft;
+ uint64_t sdma; /* start dma as */
+ uint64_t edma; /* end dma as */
+ uint32_t reserved3[11];
+ uint32_t uid;
+ uint8_t util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} QEMU_PACKED ClpRspQueryPci;
+
+/* Query PCI function group request */
+typedef struct ClpReqQueryPciGrp {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint8_t reserved2[3];
+ uint8_t g;
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpReqQueryPciGrp;
+
+/* Query PCI function group response */
+typedef struct ClpRspQueryPciGrp {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_RSP_QPCIG_MASK_NOI 0xfff
+ uint16_t i;
+ uint8_t version;
+#define CLP_RSP_QPCIG_MASK_FRAME 0x2
+#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
+ uint8_t fr;
+ uint16_t maxstbl;
+ uint16_t mui;
+ uint8_t dtsm;
+ uint8_t reserved3[7];
+ uint64_t dasm; /* dma address space mask */
+ uint64_t msia; /* MSI address */
+ uint64_t reserved4;
+ uint64_t reserved5;
+} QEMU_PACKED ClpRspQueryPciGrp;
+
+/* Set PCI function request */
+typedef struct ClpReqSetPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint16_t reserved2;
+ uint8_t oc; /* operation controls */
+ uint8_t ndas; /* number of dma spaces */
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqSetPci;
+
+/* Set PCI function response */
+typedef struct ClpRspSetPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpRspSetPci;
+
+typedef struct ClpReqRspListPci {
+ ClpReqListPci request;
+ ClpRspListPci response;
+} QEMU_PACKED ClpReqRspListPci;
+
+typedef struct ClpReqRspSetPci {
+ ClpReqSetPci request;
+ ClpRspSetPci response;
+} QEMU_PACKED ClpReqRspSetPci;
+
+typedef struct ClpReqRspQueryPci {
+ ClpReqQueryPci request;
+ ClpRspQueryPci response;
+} QEMU_PACKED ClpReqRspQueryPci;
+
+typedef struct ClpReqRspQueryPciGrp {
+ ClpReqQueryPciGrp request;
+ ClpRspQueryPciGrp response;
+} QEMU_PACKED ClpReqRspQueryPciGrp;
+
+#endif
diff --git a/include/hw/s390x/s390-pci-inst.h b/include/hw/s390x/s390-pci-inst.h
new file mode 100644
index 0000000000..a55c448aad
--- /dev/null
+++ b/include/hw/s390x/s390-pci-inst.h
@@ -0,0 +1,119 @@
+/*
+ * s390 PCI instruction definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_INST_H
+#define HW_S390_PCI_INST_H
+
+#include "s390-pci-bus.h"
+#include "sysemu/dma.h"
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Modify PCI status codes */
+#define ZPCI_MOD_ST_RES_NOT_AVAIL 4
+#define ZPCI_MOD_ST_INSUF_RES 16
+#define ZPCI_MOD_ST_SEQUENCE 24
+#define ZPCI_MOD_ST_DMAAS_INVAL 28
+#define ZPCI_MOD_ST_FRAME_INVAL 32
+#define ZPCI_MOD_ST_ERROR_RECOVER 40
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* Store PCI Function Controls status codes */
+#define ZPCI_STPCIFC_ST_PERM_ERROR 8
+#define ZPCI_STPCIFC_ST_INVAL_DMAAS 28
+#define ZPCI_STPCIFC_ST_ERROR_RECOVER 40
+
+/* Refresh PCI Translations status codes */
+#define ZPCI_RPCIT_ST_INSUFF_RES 16
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+typedef struct ZpciFib {
+ uint8_t fmt; /* format */
+ uint8_t reserved1[7];
+ uint8_t fc; /* function controls */
+ uint8_t reserved2;
+ uint16_t reserved3;
+ uint32_t reserved4;
+ uint64_t pba; /* PCI base address */
+ uint64_t pal; /* PCI address limit */
+ uint64_t iota; /* I/O Translation Anchor */
+#define FIB_DATA_ISC(x) (((x) >> 28) & 0x7)
+#define FIB_DATA_NOI(x) (((x) >> 16) & 0xfff)
+#define FIB_DATA_AIBVO(x) (((x) >> 8) & 0x3f)
+#define FIB_DATA_SUM(x) (((x) >> 7) & 0x1)
+#define FIB_DATA_AISBO(x) ((x) & 0x3f)
+ uint32_t data;
+ uint32_t reserved5;
+ uint64_t aibv; /* Adapter int bit vector address */
+ uint64_t aisb; /* Adapter int summary bit address */
+ uint64_t fmb_addr; /* Function measurement address and key */
+ uint32_t reserved6;
+ uint32_t gd;
+} QEMU_PACKED ZpciFib;
+
+int pci_dereg_irqs(S390PCIBusDevice *pbdev);
+void pci_dereg_ioat(S390PCIIOMMU *iommu);
+int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra);
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar, uintptr_t ra);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+void fmb_timer_free(S390PCIBusDevice *pbdev);
+
+#define ZPCI_IO_BAR_MIN 0
+#define ZPCI_IO_BAR_MAX 5
+#define ZPCI_CONFIG_BAR 15
+
+#endif
diff --git a/include/hw/s390x/s390-pci-kvm.h b/include/hw/s390x/s390-pci-kvm.h
new file mode 100644
index 0000000000..933814a402
--- /dev/null
+++ b/include/hw/s390x/s390-pci-kvm.h
@@ -0,0 +1,38 @@
+/*
+ * s390 PCI KVM interfaces
+ *
+ * Copyright 2022 IBM Corp.
+ * Author(s): Matthew Rosato <mjrosato@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_KVM_H
+#define HW_S390_PCI_KVM_H
+
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/s390-pci-inst.h"
+
+#ifdef CONFIG_KVM
+bool s390_pci_kvm_interp_allowed(void);
+int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, bool assist);
+int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev);
+#else
+static inline bool s390_pci_kvm_interp_allowed(void)
+{
+ return false;
+}
+static inline int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib,
+ bool assist)
+{
+ return -EINVAL;
+}
+static inline int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/hw/s390x/s390-pci-vfio.h b/include/hw/s390x/s390-pci-vfio.h
new file mode 100644
index 0000000000..ae1b126ff7
--- /dev/null
+++ b/include/hw/s390x/s390-pci-vfio.h
@@ -0,0 +1,44 @@
+/*
+ * s390 vfio-pci interfaces
+ *
+ * Copyright 2020 IBM Corp.
+ * Author(s): Matthew Rosato <mjrosato@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_VFIO_H
+#define HW_S390_PCI_VFIO_H
+
+#include "hw/s390x/s390-pci-bus.h"
+#include CONFIG_DEVICES
+
+#ifdef CONFIG_VFIO
+bool s390_pci_update_dma_avail(int fd, unsigned int *avail);
+S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
+ S390PCIBusDevice *pbdev);
+void s390_pci_end_dma_count(S390pciState *s, S390PCIDMACount *cnt);
+bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh);
+void s390_pci_get_clp_info(S390PCIBusDevice *pbdev);
+#else
+static inline bool s390_pci_update_dma_avail(int fd, unsigned int *avail)
+{
+ return false;
+}
+static inline S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
+ S390PCIBusDevice *pbdev)
+{
+ return NULL;
+}
+static inline void s390_pci_end_dma_count(S390pciState *s,
+ S390PCIDMACount *cnt) { }
+static inline bool s390_pci_get_host_fh(S390PCIBusDevice *pbdev, uint32_t *fh)
+{
+ return false;
+}
+static inline void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) { }
+#endif
+
+#endif
diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h
index cd1dccc6e3..c1d46e78af 100644
--- a/include/hw/s390x/s390-virtio-ccw.h
+++ b/include/hw/s390x/s390-virtio-ccw.h
@@ -12,16 +12,14 @@
#define HW_S390X_S390_VIRTIO_CCW_H
#include "hw/boards.h"
+#include "qom/object.h"
#define TYPE_S390_CCW_MACHINE "s390-ccw-machine"
-#define S390_CCW_MACHINE(obj) \
- OBJECT_CHECK(S390CcwMachineState, (obj), TYPE_S390_CCW_MACHINE)
+OBJECT_DECLARE_TYPE(S390CcwMachineState, S390CcwMachineClass, S390_CCW_MACHINE)
-#define S390_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390CcwMachineClass, (klass), TYPE_S390_CCW_MACHINE)
-typedef struct S390CcwMachineState {
+struct S390CcwMachineState {
/*< private >*/
MachineState parent_obj;
@@ -30,9 +28,15 @@ typedef struct S390CcwMachineState {
bool dea_key_wrap;
bool pv;
uint8_t loadparm[8];
-} S390CcwMachineState;
+};
-typedef struct S390CcwMachineClass {
+#define S390_PTF_REASON_NONE (0x00 << 8)
+#define S390_PTF_REASON_DONE (0x01 << 8)
+#define S390_PTF_REASON_BUSY (0x02 << 8)
+#define S390_TOPO_FC_MASK 0xffUL
+void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra);
+
+struct S390CcwMachineClass {
/*< private >*/
MachineClass parent_class;
@@ -41,7 +45,8 @@ typedef struct S390CcwMachineClass {
bool cpu_model_allowed;
bool css_migration_enabled;
bool hpage_1m_allowed;
-} S390CcwMachineClass;
+ int max_threads;
+};
/* runtime-instrumentation allowed by the machine */
bool ri_allowed(void);
diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h
index 4687ecfe83..3907a13d07 100644
--- a/include/hw/s390x/s390_flic.h
+++ b/include/hw/s390x/s390_flic.h
@@ -17,6 +17,7 @@
#include "hw/s390x/adapter.h"
#include "hw/virtio/virtio.h"
#include "qemu/queue.h"
+#include "qom/object.h"
/*
* Reserve enough gsis to accommodate all virtio devices.
@@ -38,22 +39,18 @@ extern const VMStateDescription vmstate_adapter_routes;
VMSTATE_STRUCT(_f, _s, 1, vmstate_adapter_routes, AdapterRoutes)
#define TYPE_S390_FLIC_COMMON "s390-flic"
-#define S390_FLIC_COMMON(obj) \
- OBJECT_CHECK(S390FLICState, (obj), TYPE_S390_FLIC_COMMON)
+OBJECT_DECLARE_TYPE(S390FLICState, S390FLICStateClass,
+ S390_FLIC_COMMON)
-typedef struct S390FLICState {
+struct S390FLICState {
SysBusDevice parent_obj;
/* to limit AdapterRoutes.num_routes for compat */
uint32_t adapter_routes_max_batch;
bool ais_supported;
-} S390FLICState;
+};
-#define S390_FLIC_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390FLICStateClass, (klass), TYPE_S390_FLIC_COMMON)
-#define S390_FLIC_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390FLICStateClass, (obj), TYPE_S390_FLIC_COMMON)
-typedef struct S390FLICStateClass {
+struct S390FLICStateClass {
DeviceClass parent_class;
int (*register_io_adapter)(S390FLICState *fs, uint32_t id, uint8_t isc,
@@ -72,15 +69,15 @@ typedef struct S390FLICStateClass {
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word);
void (*inject_crw_mchk)(S390FLICState *fs);
-} S390FLICStateClass;
+};
#define TYPE_KVM_S390_FLIC "s390-flic-kvm"
-#define KVM_S390_FLIC(obj) \
- OBJECT_CHECK(KVMS390FLICState, (obj), TYPE_KVM_S390_FLIC)
+typedef struct KVMS390FLICState KVMS390FLICState;
+DECLARE_INSTANCE_CHECKER(KVMS390FLICState, KVM_S390_FLIC,
+ TYPE_KVM_S390_FLIC)
#define TYPE_QEMU_S390_FLIC "s390-flic-qemu"
-#define QEMU_S390_FLIC(obj) \
- OBJECT_CHECK(QEMUS390FLICState, (obj), TYPE_QEMU_S390_FLIC)
+OBJECT_DECLARE_SIMPLE_TYPE(QEMUS390FLICState, QEMU_S390_FLIC)
#define SIC_IRQ_MODE_ALL 0
#define SIC_IRQ_MODE_SINGLE 1
@@ -114,14 +111,14 @@ typedef struct QEMUS390FlicIO {
QLIST_ENTRY(QEMUS390FlicIO) next;
} QEMUS390FlicIO;
-typedef struct QEMUS390FLICState {
+struct QEMUS390FLICState {
S390FLICState parent_obj;
uint32_t pending;
uint32_t service_param;
uint8_t simm;
uint8_t nimm;
QLIST_HEAD(, QEMUS390FlicIO) io[8];
-} QEMUS390FLICState;
+};
uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic);
QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic,
@@ -137,6 +134,9 @@ void s390_flic_init(void);
S390FLICState *s390_get_flic(void);
QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs);
S390FLICStateClass *s390_get_flic_class(S390FLICState *fs);
+void s390_crw_mchk(void);
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+ uint32_t io_int_parm, uint32_t io_int_word);
bool ais_needed(void *opaque);
#endif /* HW_S390_FLIC_H */
diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h
index 822eff4396..b405a387b6 100644
--- a/include/hw/s390x/sclp.h
+++ b/include/hw/s390x/sclp.h
@@ -16,6 +16,7 @@
#include "hw/sysbus.h"
#include "target/s390x/cpu-qom.h"
+#include "qom/object.h"
#define SCLP_CMD_CODE_MASK 0xffff00ff
@@ -37,10 +38,8 @@
#define MAX_STORAGE_INCREMENTS 1020
/* CPU hotplug SCLP codes */
-#define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL
+#define SCLP_HAS_CPU_INFO 0x0800000000000000ULL
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
/* SCLP PCI codes */
#define SCLP_HAS_IOA_RECONFIG 0x0000000040000000ULL
@@ -86,7 +85,7 @@
* - we work on a private copy of the SCCB, since there are several length
* fields, that would cause a security nightmare if we allow the guest to
* alter the structure while we parse it. We cannot use ldl_p and friends
- * either without doing pointer arithmetics
+ * either without doing pointer arithmetic
* So we have to double check that all users of sclp data structures use the
* right endianness wrappers.
*/
@@ -110,11 +109,14 @@ typedef struct CPUEntry {
uint8_t reserved1;
} QEMU_PACKED CPUEntry;
+#define SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET 128
+#define SCLP_READ_SCP_INFO_MNEST 4
typedef struct ReadInfo {
SCCBHeader h;
uint16_t rnmax;
uint8_t rnsize;
- uint8_t _reserved1[16 - 11]; /* 11-15 */
+ uint8_t _reserved1[15 - 11]; /* 11-14 */
+ uint8_t stsi_parm; /* 15-15 */
uint16_t entries_cpu; /* 16-17 */
uint16_t offset_cpu; /* 18-19 */
uint8_t _reserved2[24 - 20]; /* 20-23 */
@@ -132,7 +134,15 @@ typedef struct ReadInfo {
uint16_t highest_cpu;
uint8_t _reserved5[124 - 122]; /* 122-123 */
uint32_t hmfai;
+ uint8_t _reserved7[134 - 128]; /* 128-133 */
+ uint8_t fac134;
+ uint8_t _reserved8[144 - 135]; /* 135-143 */
struct CPUEntry entries[];
+ /*
+ * When the Extended-Length SCCB (ELS) feature is enabled the
+ * start of the entries field begins at an offset denoted by the
+ * offset_cpu field, otherwise it's at an offset of 128.
+ */
} QEMU_PACKED ReadInfo;
typedef struct ReadCpuInfo {
@@ -177,26 +187,25 @@ typedef struct IoaCfgSccb {
typedef struct SCCB {
SCCBHeader h;
- char data[SCCB_DATA_LEN];
+ char data[];
} QEMU_PACKED SCCB;
#define TYPE_SCLP "sclp"
-#define SCLP(obj) OBJECT_CHECK(SCLPDevice, (obj), TYPE_SCLP)
-#define SCLP_CLASS(oc) OBJECT_CLASS_CHECK(SCLPDeviceClass, (oc), TYPE_SCLP)
-#define SCLP_GET_CLASS(obj) OBJECT_GET_CLASS(SCLPDeviceClass, (obj), TYPE_SCLP)
+OBJECT_DECLARE_TYPE(SCLPDevice, SCLPDeviceClass,
+ SCLP)
-typedef struct SCLPEventFacility SCLPEventFacility;
+struct SCLPEventFacility;
-typedef struct SCLPDevice {
+struct SCLPDevice {
/* private */
DeviceState parent_obj;
- SCLPEventFacility *event_facility;
+ struct SCLPEventFacility *event_facility;
int increment_size;
/* public */
-} SCLPDevice;
+};
-typedef struct SCLPDeviceClass {
+struct SCLPDeviceClass {
/* private */
DeviceClass parent_class;
void (*read_SCP_info)(SCLPDevice *sclp, SCCB *sccb);
@@ -205,7 +214,7 @@ typedef struct SCLPDeviceClass {
/* public */
void (*execute)(SCLPDevice *sclp, SCCB *sccb, uint32_t code);
void (*service_interrupt)(SCLPDevice *sclp, uint32_t sccb);
-} SCLPDeviceClass;
+};
static inline int sccb_data_len(SCCB *sccb)
{
@@ -216,8 +225,7 @@ static inline int sccb_data_len(SCCB *sccb)
void s390_sclp_init(void);
void sclp_service_interrupt(uint32_t sccb);
void raise_irq_cpu_hotplug(void);
-int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
-int sclp_service_call_protected(CPUS390XState *env, uint64_t sccb,
- uint32_t code);
+int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code);
+int sclp_service_call_protected(S390CPU *cpu, uint64_t sccb, uint32_t code);
#endif
diff --git a/include/hw/s390x/storage-attributes.h b/include/hw/s390x/storage-attributes.h
index 4f7c6c0877..8921a04d51 100644
--- a/include/hw/s390x/storage-attributes.h
+++ b/include/hw/s390x/storage-attributes.h
@@ -14,26 +14,22 @@
#include "hw/qdev-core.h"
#include "monitor/monitor.h"
+#include "qom/object.h"
#define TYPE_S390_STATTRIB "s390-storage_attributes"
#define TYPE_QEMU_S390_STATTRIB "s390-storage_attributes-qemu"
#define TYPE_KVM_S390_STATTRIB "s390-storage_attributes-kvm"
-#define S390_STATTRIB(obj) \
- OBJECT_CHECK(S390StAttribState, (obj), TYPE_S390_STATTRIB)
+OBJECT_DECLARE_TYPE(S390StAttribState, S390StAttribClass, S390_STATTRIB)
-typedef struct S390StAttribState {
+struct S390StAttribState {
DeviceState parent_obj;
uint64_t migration_cur_gfn;
bool migration_enabled;
-} S390StAttribState;
+};
-#define S390_STATTRIB_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390StAttribClass, (klass), TYPE_S390_STATTRIB)
-#define S390_STATTRIB_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390StAttribClass, (obj), TYPE_S390_STATTRIB)
-typedef struct S390StAttribClass {
+struct S390StAttribClass {
DeviceClass parent_class;
/* Return value: < 0 on error, or new count */
int (*get_stattr)(S390StAttribState *sa, uint64_t *start_gfn,
@@ -43,26 +39,28 @@ typedef struct S390StAttribClass {
int (*set_stattr)(S390StAttribState *sa, uint64_t start_gfn,
uint32_t count, uint8_t *values);
void (*synchronize)(S390StAttribState *sa);
- int (*set_migrationmode)(S390StAttribState *sa, bool value);
+ int (*set_migrationmode)(S390StAttribState *sa, bool value, Error **errp);
int (*get_active)(S390StAttribState *sa);
long long (*get_dirtycount)(S390StAttribState *sa);
-} S390StAttribClass;
+};
-#define QEMU_S390_STATTRIB(obj) \
- OBJECT_CHECK(QEMUS390StAttribState, (obj), TYPE_QEMU_S390_STATTRIB)
+typedef struct QEMUS390StAttribState QEMUS390StAttribState;
+DECLARE_INSTANCE_CHECKER(QEMUS390StAttribState, QEMU_S390_STATTRIB,
+ TYPE_QEMU_S390_STATTRIB)
-typedef struct QEMUS390StAttribState {
+struct QEMUS390StAttribState {
S390StAttribState parent_obj;
-} QEMUS390StAttribState;
+};
-#define KVM_S390_STATTRIB(obj) \
- OBJECT_CHECK(KVMS390StAttribState, (obj), TYPE_KVM_S390_STATTRIB)
+typedef struct KVMS390StAttribState KVMS390StAttribState;
+DECLARE_INSTANCE_CHECKER(KVMS390StAttribState, KVM_S390_STATTRIB,
+ TYPE_KVM_S390_STATTRIB)
-typedef struct KVMS390StAttribState {
+struct KVMS390StAttribState {
S390StAttribState parent_obj;
uint64_t still_dirty;
uint8_t *incoming_buffer;
-} KVMS390StAttribState;
+};
void s390_stattrib_init(void);
diff --git a/include/hw/s390x/storage-keys.h b/include/hw/s390x/storage-keys.h
index 3f1ae7e778..aa2ec2aae5 100644
--- a/include/hw/s390x/storage-keys.h
+++ b/include/hw/s390x/storage-keys.h
@@ -14,41 +14,101 @@
#include "hw/qdev-core.h"
#include "monitor/monitor.h"
+#include "qom/object.h"
#define TYPE_S390_SKEYS "s390-skeys"
-#define S390_SKEYS(obj) \
- OBJECT_CHECK(S390SKeysState, (obj), TYPE_S390_SKEYS)
+OBJECT_DECLARE_TYPE(S390SKeysState, S390SKeysClass, S390_SKEYS)
-typedef struct S390SKeysState {
+struct S390SKeysState {
DeviceState parent_obj;
bool migration_enabled;
-} S390SKeysState;
+};
-#define S390_SKEYS_CLASS(klass) \
- OBJECT_CLASS_CHECK(S390SKeysClass, (klass), TYPE_S390_SKEYS)
-#define S390_SKEYS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(S390SKeysClass, (obj), TYPE_S390_SKEYS)
-typedef struct S390SKeysClass {
+struct S390SKeysClass {
DeviceClass parent_class;
- int (*skeys_enabled)(S390SKeysState *ks);
+
+ /**
+ * @skeys_are_enabled:
+ *
+ * Check whether storage keys are enabled. If not enabled, they were not
+ * enabled lazily either by the guest via a storage key instruction or
+ * by the host during migration.
+ *
+ * If disabled, everything not explicitly triggered by the guest,
+ * such as outgoing migration or dirty/change tracking, should not touch
+ * storage keys and should not lazily enable it.
+ *
+ * @ks: the #S390SKeysState
+ *
+ * Returns false if not enabled and true if enabled.
+ */
+ bool (*skeys_are_enabled)(S390SKeysState *ks);
+
+ /**
+ * @enable_skeys:
+ *
+ * Lazily enable storage keys. If this function is not implemented,
+ * setting a storage key will lazily enable storage keys implicitly
+ * instead. TCG guests have to make sure to flush the TLB of all CPUs
+ * if storage keys were not enabled before this call.
+ *
+ * @ks: the #S390SKeysState
+ *
+ * Returns false if not enabled before this call, and true if already
+ * enabled.
+ */
+ bool (*enable_skeys)(S390SKeysState *ks);
+
+ /**
+ * @get_skeys:
+ *
+ * Get storage keys for the given PFN range. This call will fail if
+ * storage keys have not been lazily enabled yet.
+ *
+ * Callers have to validate that a GFN is valid before this call.
+ *
+ * @ks: the #S390SKeysState
+ * @start_gfn: the start GFN to get storage keys for
+ * @count: the number of storage keys to get
+ * @keys: the byte array where storage keys will be stored to
+ *
+ * Returns 0 on success, returns an error if getting a storage key failed.
+ */
int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count,
uint8_t *keys);
+ /**
+ * @set_skeys:
+ *
+ * Set storage keys for the given PFN range. This call will fail if
+ * storage keys have not been lazily enabled yet and implicit
+ * enablement is not supported.
+ *
+ * Callers have to validate that a GFN is valid before this call.
+ *
+ * @ks: the #S390SKeysState
+ * @start_gfn: the start GFN to set storage keys for
+ * @count: the number of storage keys to set
+ * @keys: the byte array where storage keys will be read from
+ *
+ * Returns 0 on success, returns an error if setting a storage key failed.
+ */
int (*set_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count,
uint8_t *keys);
-} S390SKeysClass;
+};
#define TYPE_KVM_S390_SKEYS "s390-skeys-kvm"
#define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu"
-#define QEMU_S390_SKEYS(obj) \
- OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS)
+typedef struct QEMUS390SKeysState QEMUS390SKeysState;
+DECLARE_INSTANCE_CHECKER(QEMUS390SKeysState, QEMU_S390_SKEYS,
+ TYPE_QEMU_S390_SKEYS)
-typedef struct QEMUS390SKeysState {
+struct QEMUS390SKeysState {
S390SKeysState parent_obj;
uint8_t *keydata;
uint32_t key_count;
-} QEMUS390SKeysState;
+};
void s390_skeys_init(void);
diff --git a/include/hw/s390x/tod.h b/include/hw/s390x/tod.h
index 4251623f7f..0935e85089 100644
--- a/include/hw/s390x/tod.h
+++ b/include/hw/s390x/tod.h
@@ -12,7 +12,8 @@
#define HW_S390_TOD_H
#include "hw/qdev-core.h"
-#include "target/s390x/s390-tod.h"
+#include "tcg/s390-tod.h"
+#include "qom/object.h"
typedef struct S390TOD {
uint8_t high;
@@ -20,15 +21,11 @@ typedef struct S390TOD {
} S390TOD;
#define TYPE_S390_TOD "s390-tod"
-#define S390_TOD(obj) OBJECT_CHECK(S390TODState, (obj), TYPE_S390_TOD)
-#define S390_TOD_CLASS(oc) OBJECT_CLASS_CHECK(S390TODClass, (oc), \
- TYPE_S390_TOD)
-#define S390_TOD_GET_CLASS(obj) OBJECT_GET_CLASS(S390TODClass, (obj), \
- TYPE_S390_TOD)
+OBJECT_DECLARE_TYPE(S390TODState, S390TODClass, S390_TOD)
#define TYPE_KVM_S390_TOD TYPE_S390_TOD "-kvm"
#define TYPE_QEMU_S390_TOD TYPE_S390_TOD "-qemu"
-typedef struct S390TODState {
+struct S390TODState {
/* private */
DeviceState parent_obj;
@@ -39,9 +36,9 @@ typedef struct S390TODState {
S390TOD base;
/* Used by KVM to remember if the TOD is stopped and base is valid. */
bool stopped;
-} S390TODState;
+};
-typedef struct S390TODClass {
+struct S390TODClass {
/* private */
DeviceClass parent_class;
void (*parent_realize)(DeviceState *dev, Error **errp);
@@ -49,7 +46,7 @@ typedef struct S390TODClass {
/* public */
void (*get)(const S390TODState *td, S390TOD *tod, Error **errp);
void (*set)(S390TODState *td, const S390TOD *tod, Error **errp);
-} S390TODClass;
+};
void s390_init_tod(void);
S390TODState *s390_get_todstate(void);
diff --git a/include/hw/s390x/vfio-ccw.h b/include/hw/s390x/vfio-ccw.h
index ee5250d0d7..4209d27657 100644
--- a/include/hw/s390x/vfio-ccw.h
+++ b/include/hw/s390x/vfio-ccw.h
@@ -17,12 +17,9 @@
#include "hw/vfio/vfio-common.h"
#include "hw/s390x/s390-ccw.h"
#include "hw/s390x/ccw-device.h"
+#include "qom/object.h"
#define TYPE_VFIO_CCW "vfio-ccw"
-#define VFIO_CCW(obj) \
- OBJECT_CHECK(VFIOCCWDevice, (obj), TYPE_VFIO_CCW)
-
-#define TYPE_VFIO_CCW "vfio-ccw"
-typedef struct VFIOCCWDevice VFIOCCWDevice;
+OBJECT_DECLARE_SIMPLE_TYPE(VFIOCCWDevice, VFIO_CCW)
#endif
diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h
index 6ba47dac41..533d856aa3 100644
--- a/include/hw/scsi/esp.h
+++ b/include/hw/scsi/esp.h
@@ -3,50 +3,45 @@
#include "hw/scsi/scsi.h"
#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
/* esp.c */
#define ESP_MAX_DEVS 7
typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len);
#define ESP_REGS 16
-#define TI_BUFSZ 16
-#define ESP_CMDBUF_SZ 32
+#define ESP_FIFO_SZ 16
+#define ESP_CMDFIFO_SZ 32
typedef struct ESPState ESPState;
-enum pdma_origin_id {
- PDMA,
- TI,
- CMD,
- ASYNC,
-};
+#define TYPE_ESP "esp"
+OBJECT_DECLARE_SIMPLE_TYPE(ESPState, ESP)
struct ESPState {
+ DeviceState parent_obj;
+
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
qemu_irq irq;
- qemu_irq irq_data;
+ qemu_irq drq_irq;
+ bool drq_state;
uint8_t chip_id;
bool tchi_written;
int32_t ti_size;
- uint32_t ti_rptr, ti_wptr;
uint32_t status;
- uint32_t deferred_status;
- bool deferred_complete;
uint32_t dma;
- uint8_t ti_buf[TI_BUFSZ];
+ Fifo8 fifo;
SCSIBus bus;
SCSIDevice *current_dev;
SCSIRequest *current_req;
- uint8_t cmdbuf[ESP_CMDBUF_SZ];
- uint32_t cmdlen;
+ Fifo8 cmdfifo;
+ uint8_t cmdfifo_cdb_offset;
+ uint8_t lun;
uint32_t do_cmd;
- /* The amount of data left in the current DMA transfer. */
- uint32_t dma_left;
- /* The size of the current DMA transfer. Zero if no transfer is in
- progress. */
- uint32_t dma_counter;
+ bool data_ready;
int dma_enabled;
uint32_t async_len;
@@ -56,18 +51,25 @@ struct ESPState {
ESPDMAMemoryReadWriteFunc dma_memory_write;
void *dma_opaque;
void (*dma_cb)(ESPState *s);
- uint8_t pdma_buf[32];
- int pdma_origin;
- uint32_t pdma_len;
- uint32_t pdma_start;
- uint32_t pdma_cur;
- void (*pdma_cb)(ESPState *s);
+
+ uint8_t mig_version_id;
+
+ /* Legacy fields for vmstate_esp version < 5 */
+ uint32_t mig_dma_left;
+ uint32_t mig_deferred_status;
+ bool mig_deferred_complete;
+ uint32_t mig_ti_rptr, mig_ti_wptr;
+ uint8_t mig_ti_buf[ESP_FIFO_SZ];
+ uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ];
+ uint32_t mig_cmdlen;
+
+ uint8_t mig_ti_cmd;
};
-#define TYPE_ESP "esp"
-#define ESP_STATE(obj) OBJECT_CHECK(SysBusESPState, (obj), TYPE_ESP)
+#define TYPE_SYSBUS_ESP "sysbus-esp"
+OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, SYSBUS_ESP)
-typedef struct {
+struct SysBusESPState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -76,7 +78,7 @@ typedef struct {
MemoryRegion pdma;
uint32_t it_shift;
ESPState esp;
-} SysBusESPState;
+};
#define ESP_TCLO 0x0
#define ESP_TCMID 0x1
@@ -141,6 +143,7 @@ typedef struct {
#define INTR_RST 0x80
#define SEQ_0 0x0
+#define SEQ_MO 0x1
#define SEQ_CD 0x4
#define CFG1_RESREPT 0x40
@@ -150,11 +153,12 @@ typedef struct {
void esp_dma_enable(ESPState *s, int irq, int level);
void esp_request_cancelled(SCSIRequest *req);
-void esp_command_complete(SCSIRequest *req, uint32_t status, size_t resid);
+void esp_command_complete(SCSIRequest *req, size_t resid);
void esp_transfer_data(SCSIRequest *req, uint32_t len);
void esp_hard_reset(ESPState *s);
uint64_t esp_reg_read(ESPState *s, uint32_t saddr);
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val);
extern const VMStateDescription vmstate_esp;
+int esp_pre_save(void *opaque);
#endif
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 2fc23e44ba..c3d5e17e38 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -6,8 +6,9 @@
#include "hw/qdev-core.h"
#include "scsi/utils.h"
#include "qemu/notify.h"
+#include "qom/object.h"
-#define MAX_SCSI_DEVS 255
+#define MAX_SCSI_DEVS 255
typedef struct SCSIBus SCSIBus;
typedef struct SCSIBusInfo SCSIBusInfo;
@@ -17,6 +18,7 @@ typedef struct SCSIReqOps SCSIReqOps;
#define SCSI_SENSE_BUF_SIZE_OLD 96
#define SCSI_SENSE_BUF_SIZE 252
+#define DEFAULT_IO_TIMEOUT 30
struct SCSIRequest {
SCSIBus *bus;
@@ -25,9 +27,10 @@ struct SCSIRequest {
uint32_t refcount;
uint32_t tag;
uint32_t lun;
- uint32_t status;
+ int16_t status;
+ int16_t host_status;
void *hba_private;
- size_t resid;
+ uint64_t residual;
SCSICommand cmd;
NotifierList cancel_notifiers;
@@ -49,36 +52,36 @@ struct SCSIRequest {
};
#define TYPE_SCSI_DEVICE "scsi-device"
-#define SCSI_DEVICE(obj) \
- OBJECT_CHECK(SCSIDevice, (obj), TYPE_SCSI_DEVICE)
-#define SCSI_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SCSIDeviceClass, (klass), TYPE_SCSI_DEVICE)
-#define SCSI_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SCSIDeviceClass, (obj), TYPE_SCSI_DEVICE)
-
-typedef struct SCSIDeviceClass {
+OBJECT_DECLARE_TYPE(SCSIDevice, SCSIDeviceClass, SCSI_DEVICE)
+
+struct SCSIDeviceClass {
DeviceClass parent_class;
void (*realize)(SCSIDevice *dev, Error **errp);
void (*unrealize)(SCSIDevice *dev);
int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
+ size_t buf_len, void *hba_private);
SCSIRequest *(*alloc_req)(SCSIDevice *s, uint32_t tag, uint32_t lun,
uint8_t *buf, void *hba_private);
void (*unit_attention_reported)(SCSIDevice *s);
-} SCSIDeviceClass;
+};
struct SCSIDevice
{
DeviceState qdev;
VMChangeStateEntry *vmsentry;
- QEMUBH *bh;
uint32_t id;
BlockConf conf;
SCSISense unit_attention;
bool sense_is_ua;
uint8_t sense[SCSI_SENSE_BUF_SIZE];
uint32_t sense_len;
+
+ /*
+ * The requests list is only accessed from the AioContext that executes
+ * requests or from the main loop when IOThread processing is stopped.
+ */
QTAILQ_HEAD(, SCSIRequest) requests;
+
uint32_t channel;
uint32_t lun;
int blocksize;
@@ -88,6 +91,7 @@ struct SCSIDevice
uint64_t port_wwn;
int scsi_version;
int default_scsi_version;
+ uint32_t io_timeout;
bool needs_vpd_bl_emulation;
bool hba_supports_iothread;
};
@@ -109,6 +113,7 @@ int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num);
/* scsi-bus.c */
struct SCSIReqOps {
size_t size;
+ void (*init_req)(SCSIRequest *req);
void (*free_req)(SCSIRequest *req);
int32_t (*send_command)(SCSIRequest *req, uint8_t *buf);
void (*read_data)(SCSIRequest *req);
@@ -123,9 +128,10 @@ struct SCSIBusInfo {
int tcq;
int max_channel, max_target, max_lun;
int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
+ size_t buf_len, void *hba_private);
void (*transfer_data)(SCSIRequest *req, uint32_t arg);
- void (*complete)(SCSIRequest *req, uint32_t arg, size_t resid);
+ void (*fail)(SCSIRequest *req);
+ void (*complete)(SCSIRequest *req, size_t residual);
void (*cancel)(SCSIRequest *req);
void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense);
QEMUSGList *(*get_sg_list)(SCSIRequest *req);
@@ -133,10 +139,20 @@ struct SCSIBusInfo {
void (*save_request)(QEMUFile *f, SCSIRequest *req);
void *(*load_request)(QEMUFile *f, SCSIRequest *req);
void (*free_request)(SCSIBus *bus, void *priv);
+
+ /*
+ * Temporarily stop submitting new requests between drained_begin() and
+ * drained_end(). Called from the main loop thread with the BQL held.
+ *
+ * Implement these callbacks if request processing is triggered by a file
+ * descriptor like an EventNotifier. Otherwise set them to NULL.
+ */
+ void (*drained_begin)(SCSIBus *bus);
+ void (*drained_end)(SCSIBus *bus);
};
#define TYPE_SCSI_BUS "SCSI"
-#define SCSI_BUS(obj) OBJECT_CHECK(SCSIBus, (obj), TYPE_SCSI_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(SCSIBus, SCSI_BUS)
struct SCSIBus {
BusState qbus;
@@ -144,10 +160,38 @@ struct SCSIBus {
SCSISense unit_attention;
const SCSIBusInfo *info;
+
+ int drain_count; /* protected by BQL */
};
-void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
- const SCSIBusInfo *info, const char *bus_name);
+/**
+ * scsi_bus_init_named: Initialize a SCSI bus with the specified name
+ * @bus: SCSIBus object to initialize
+ * @bus_size: size of @bus object
+ * @host: Device which owns the bus (generally the SCSI controller)
+ * @info: structure defining callbacks etc for the controller
+ * @bus_name: Name to use for this bus
+ *
+ * This in-place initializes @bus as a new SCSI bus with a name
+ * provided by the caller. It is the caller's responsibility to make
+ * sure that name does not clash with the name of any other bus in the
+ * system. Unless you need the new bus to have a specific name, you
+ * should use scsi_bus_init() instead.
+ */
+void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
+ const SCSIBusInfo *info, const char *bus_name);
+
+/**
+ * scsi_bus_init: Initialize a SCSI bus
+ *
+ * This in-place-initializes @bus as a new SCSI bus and gives it
+ * an automatically generated unique name.
+ */
+static inline void scsi_bus_init(SCSIBus *bus, size_t bus_size,
+ DeviceState *host, const SCSIBusInfo *info)
+{
+ scsi_bus_init_named(bus, bus_size, host, info, NULL);
+}
static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d)
{
@@ -155,36 +199,37 @@ static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d)
}
SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
- int unit, bool removable, int bootindex,
- bool share_rw,
- BlockdevOnError rerror,
- BlockdevOnError werror,
+ int unit, bool removable, BlockConf *conf,
const char *serial, Error **errp);
+void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense);
void scsi_bus_legacy_handle_cmdline(SCSIBus *bus);
-void scsi_legacy_handle_cmdline(void);
SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
uint32_t tag, uint32_t lun, void *hba_private);
SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
- uint8_t *buf, void *hba_private);
+ uint8_t *buf, size_t buf_len, void *hba_private);
int32_t scsi_req_enqueue(SCSIRequest *req);
SCSIRequest *scsi_req_ref(SCSIRequest *req);
void scsi_req_unref(SCSIRequest *req);
int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
- void *hba_private);
-int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf);
+ size_t buf_len, void *hba_private);
+int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
+ size_t buf_len);
void scsi_req_build_sense(SCSIRequest *req, SCSISense sense);
void scsi_req_print(SCSIRequest *req);
void scsi_req_continue(SCSIRequest *req);
void scsi_req_data(SCSIRequest *req, int len);
void scsi_req_complete(SCSIRequest *req, int status);
+void scsi_req_complete_failed(SCSIRequest *req, int host_status);
uint8_t *scsi_req_get_buf(SCSIRequest *req);
int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len);
void scsi_req_cancel_complete(SCSIRequest *req);
void scsi_req_cancel(SCSIRequest *req);
void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier);
void scsi_req_retry(SCSIRequest *req);
+void scsi_device_drained_begin(SCSIDevice *sdev);
+void scsi_device_drained_end(SCSIDevice *sdev);
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
@@ -192,10 +237,17 @@ void scsi_device_unit_attention_reported(SCSIDevice *dev);
void scsi_generic_read_device_inquiry(SCSIDevice *dev);
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed);
int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
- uint8_t *buf, uint8_t buf_size);
+ uint8_t *buf, uint8_t buf_size, uint32_t timeout);
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int target, int lun);
+SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int target, int lun);
/* scsi-generic.c. */
extern const SCSIReqOps scsi_generic_req_ops;
+/* scsi-disk.c */
+#define SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR 0
+#define SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD 1
+#define SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE 2
+#define SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED 3
+
#endif
diff --git a/include/hw/sd/allwinner-sdhost.h b/include/hw/sd/allwinner-sdhost.h
index d94606a853..1b951177dd 100644
--- a/include/hw/sd/allwinner-sdhost.h
+++ b/include/hw/sd/allwinner-sdhost.h
@@ -38,6 +38,12 @@
/** Allwinner sun5i family and newer (A13, H2+, H3, etc) */
#define TYPE_AW_SDHOST_SUN5I TYPE_AW_SDHOST "-sun5i"
+/** Allwinner sun50i-a64 */
+#define TYPE_AW_SDHOST_SUN50I_A64 TYPE_AW_SDHOST "-sun50i-a64"
+
+/** Allwinner sun50i-a64 emmc */
+#define TYPE_AW_SDHOST_SUN50I_A64_EMMC TYPE_AW_SDHOST "-sun50i-a64-emmc"
+
/** @} */
/**
@@ -45,19 +51,14 @@
* @{
*/
-#define AW_SDHOST(obj) \
- OBJECT_CHECK(AwSdHostState, (obj), TYPE_AW_SDHOST)
-#define AW_SDHOST_CLASS(klass) \
- OBJECT_CLASS_CHECK(AwSdHostClass, (klass), TYPE_AW_SDHOST)
-#define AW_SDHOST_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AwSdHostClass, (obj), TYPE_AW_SDHOST)
+OBJECT_DECLARE_TYPE(AwSdHostState, AwSdHostClass, AW_SDHOST)
/** @} */
/**
* Allwinner SD Host Controller object instance state.
*/
-typedef struct AwSdHostState {
+struct AwSdHostState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
@@ -71,6 +72,12 @@ typedef struct AwSdHostState {
/** Interrupt output signal to notify CPU */
qemu_irq irq;
+ /** Memory region where DMA transfers are done */
+ MemoryRegion *dma_mr;
+
+ /** Address space used internally for DMA transfers */
+ AddressSpace dma_as;
+
/** Number of bytes left in current DMA transfer */
uint32_t transfer_cnt;
@@ -109,11 +116,12 @@ typedef struct AwSdHostState {
uint32_t startbit_detect; /**< eMMC DDR Start Bit Detection Control */
uint32_t response_crc; /**< Response CRC */
uint32_t data_crc[8]; /**< Data CRC */
+ uint32_t sample_delay; /**< Sample delay control */
uint32_t status_crc; /**< Status CRC */
/** @} */
-} AwSdHostState;
+};
/**
* Allwinner SD Host Controller class-level struct.
@@ -122,14 +130,17 @@ typedef struct AwSdHostState {
* such that the generic code can use this struct to support
* all devices.
*/
-typedef struct AwSdHostClass {
+struct AwSdHostClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
/** Maximum buffer size in bytes per DMA descriptor */
size_t max_desc_size;
+ bool is_sun4i;
-} AwSdHostClass;
+ /** does the IP block support autocalibration? */
+ bool can_calibrate;
+};
#endif /* HW_SD_ALLWINNER_SDHOST_H */
diff --git a/include/hw/sd/aspeed_sdhci.h b/include/hw/sd/aspeed_sdhci.h
index dffbb46946..057bc5f3d1 100644
--- a/include/hw/sd/aspeed_sdhci.h
+++ b/include/hw/sd/aspeed_sdhci.h
@@ -3,24 +3,24 @@
* Eddie James <eajames@linux.ibm.com>
*
* Copyright (C) 2019 IBM Corp
- * SPDX-License-Identifer: GPL-2.0-or-later
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ASPEED_SDHCI_H
#define ASPEED_SDHCI_H
#include "hw/sd/sdhci.h"
+#include "qom/object.h"
#define TYPE_ASPEED_SDHCI "aspeed.sdhci"
-#define ASPEED_SDHCI(obj) OBJECT_CHECK(AspeedSDHCIState, (obj), \
- TYPE_ASPEED_SDHCI)
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedSDHCIState, ASPEED_SDHCI)
#define ASPEED_SDHCI_CAPABILITIES 0x01E80080
#define ASPEED_SDHCI_NUM_SLOTS 2
#define ASPEED_SDHCI_NUM_REGS (ASPEED_SDHCI_REG_SIZE / sizeof(uint32_t))
#define ASPEED_SDHCI_REG_SIZE 0x100
-typedef struct AspeedSDHCIState {
+struct AspeedSDHCIState {
SysBusDevice parent;
SDHCIState slots[ASPEED_SDHCI_NUM_SLOTS];
@@ -30,6 +30,6 @@ typedef struct AspeedSDHCIState {
qemu_irq irq;
uint32_t regs[ASPEED_SDHCI_NUM_REGS];
-} AspeedSDHCIState;
+};
#endif /* ASPEED_SDHCI_H */
diff --git a/include/hw/sd/bcm2835_sdhost.h b/include/hw/sd/bcm2835_sdhost.h
index 7520dd6507..f6bca5c397 100644
--- a/include/hw/sd/bcm2835_sdhost.h
+++ b/include/hw/sd/bcm2835_sdhost.h
@@ -16,14 +16,14 @@
#include "hw/sysbus.h"
#include "hw/sd/sd.h"
+#include "qom/object.h"
#define TYPE_BCM2835_SDHOST "bcm2835-sdhost"
-#define BCM2835_SDHOST(obj) \
- OBJECT_CHECK(BCM2835SDHostState, (obj), TYPE_BCM2835_SDHOST)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SDHostState, BCM2835_SDHOST)
#define BCM2835_SDHOST_FIFO_LEN 16
-typedef struct {
+struct BCM2835SDHostState {
SysBusDevice busdev;
SDBus sdbus;
MemoryRegion iomem;
@@ -43,6 +43,6 @@ typedef struct {
uint32_t datacnt;
qemu_irq irq;
-} BCM2835SDHostState;
+};
#endif
diff --git a/include/hw/sd/cadence_sdhci.h b/include/hw/sd/cadence_sdhci.h
new file mode 100644
index 0000000000..cd8288b7d8
--- /dev/null
+++ b/include/hw/sd/cadence_sdhci.h
@@ -0,0 +1,47 @@
+/*
+ * Cadence SDHCI emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CADENCE_SDHCI_H
+#define CADENCE_SDHCI_H
+
+#include "hw/sd/sdhci.h"
+
+#define CADENCE_SDHCI_REG_SIZE 0x100
+#define CADENCE_SDHCI_NUM_REGS (CADENCE_SDHCI_REG_SIZE / sizeof(uint32_t))
+
+typedef struct CadenceSDHCIState {
+ SysBusDevice parent;
+
+ MemoryRegion container;
+ MemoryRegion iomem;
+ BusState *bus;
+
+ uint32_t regs[CADENCE_SDHCI_NUM_REGS];
+
+ SDHCIState sdhci;
+} CadenceSDHCIState;
+
+#define TYPE_CADENCE_SDHCI "cadence.sdhci"
+#define CADENCE_SDHCI(obj) OBJECT_CHECK(CadenceSDHCIState, (obj), \
+ TYPE_CADENCE_SDHCI)
+
+#endif /* CADENCE_SDHCI_H */
diff --git a/include/hw/sd/npcm7xx_sdhci.h b/include/hw/sd/npcm7xx_sdhci.h
new file mode 100644
index 0000000000..ad8002f766
--- /dev/null
+++ b/include/hw/sd/npcm7xx_sdhci.h
@@ -0,0 +1,65 @@
+/*
+ * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller
+ *
+ * Copyright (c) 2021 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef NPCM7XX_SDHCI_H
+#define NPCM7XX_SDHCI_H
+
+#include "hw/sd/sdhci.h"
+#include "qom/object.h"
+
+#define TYPE_NPCM7XX_SDHCI "npcm7xx.sdhci"
+#define NPCM7XX_PRSTVALS_SIZE 6
+#define NPCM7XX_PRSTVALS 0x60
+#define NPCM7XX_PRSTVALS_0 0x0
+#define NPCM7XX_PRSTVALS_1 0x2
+#define NPCM7XX_PRSTVALS_2 0x4
+#define NPCM7XX_PRSTVALS_3 0x6
+#define NPCM7XX_PRSTVALS_4 0x8
+#define NPCM7XX_PRSTVALS_5 0xA
+#define NPCM7XX_BOOTTOCTRL 0x10
+#define NPCM7XX_SDHCI_REGSIZE 0x20
+
+#define NPCM7XX_PRSNTS_RESET 0x04A00000
+#define NPCM7XX_BLKGAP_RESET 0x80
+#define NPCM7XX_CAPAB_RESET 0x0100200161EE0399
+#define NPCM7XX_MAXCURR_RESET 0x0000000000000005
+#define NPCM7XX_HCVER_RESET 0x1002
+
+#define NPCM7XX_PRSTVALS_0_RESET 0x0040
+#define NPCM7XX_PRSTVALS_1_RESET 0x0001
+#define NPCM7XX_PRSTVALS_3_RESET 0x0001
+
+OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSDHCIState, NPCM7XX_SDHCI)
+
+typedef struct NPCM7xxRegs {
+ /* Preset Values Register Field, read-only */
+ uint16_t prstvals[NPCM7XX_PRSTVALS_SIZE];
+ /* Boot Timeout Control Register, read-write */
+ uint32_t boottoctrl;
+} NPCM7xxRegisters;
+
+struct NPCM7xxSDHCIState {
+ SysBusDevice parent;
+
+ MemoryRegion container;
+ MemoryRegion iomem;
+ BusState *bus;
+ NPCM7xxRegisters regs;
+
+ SDHCIState sdhci;
+};
+
+#endif /* NPCM7XX_SDHCI_H */
diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h
index a84b8e274a..2c8748fb9b 100644
--- a/include/hw/sd/sd.h
+++ b/include/hw/sd/sd.h
@@ -31,28 +31,29 @@
#define HW_SD_H
#include "hw/qdev-core.h"
-
-#define OUT_OF_RANGE (1 << 31)
-#define ADDRESS_ERROR (1 << 30)
-#define BLOCK_LEN_ERROR (1 << 29)
-#define ERASE_SEQ_ERROR (1 << 28)
-#define ERASE_PARAM (1 << 27)
-#define WP_VIOLATION (1 << 26)
-#define CARD_IS_LOCKED (1 << 25)
-#define LOCK_UNLOCK_FAILED (1 << 24)
-#define COM_CRC_ERROR (1 << 23)
-#define ILLEGAL_COMMAND (1 << 22)
-#define CARD_ECC_FAILED (1 << 21)
-#define CC_ERROR (1 << 20)
-#define SD_ERROR (1 << 19)
-#define CID_CSD_OVERWRITE (1 << 16)
-#define WP_ERASE_SKIP (1 << 15)
-#define CARD_ECC_DISABLED (1 << 14)
-#define ERASE_RESET (1 << 13)
-#define CURRENT_STATE (7 << 9)
-#define READY_FOR_DATA (1 << 8)
-#define APP_CMD (1 << 5)
-#define AKE_SEQ_ERROR (1 << 3)
+#include "qom/object.h"
+
+#define OUT_OF_RANGE (1 << 31)
+#define ADDRESS_ERROR (1 << 30)
+#define BLOCK_LEN_ERROR (1 << 29)
+#define ERASE_SEQ_ERROR (1 << 28)
+#define ERASE_PARAM (1 << 27)
+#define WP_VIOLATION (1 << 26)
+#define CARD_IS_LOCKED (1 << 25)
+#define LOCK_UNLOCK_FAILED (1 << 24)
+#define COM_CRC_ERROR (1 << 23)
+#define ILLEGAL_COMMAND (1 << 22)
+#define CARD_ECC_FAILED (1 << 21)
+#define CC_ERROR (1 << 20)
+#define SD_ERROR (1 << 19)
+#define CID_CSD_OVERWRITE (1 << 16)
+#define WP_ERASE_SKIP (1 << 15)
+#define CARD_ECC_DISABLED (1 << 14)
+#define ERASE_RESET (1 << 13)
+#define CURRENT_STATE (7 << 9)
+#define READY_FOR_DATA (1 << 8)
+#define APP_CMD (1 << 5)
+#define AKE_SEQ_ERROR (1 << 3)
enum SDPhySpecificationVersion {
SD_PHY_SPECv1_10_VERS = 1,
@@ -76,10 +77,10 @@ typedef enum {
typedef enum {
sd_none = -1,
- sd_bc = 0, /* broadcast -- no response */
- sd_bcr, /* broadcast with response */
- sd_ac, /* addressed -- no data transfer */
- sd_adtc, /* addressed with data transfer */
+ sd_bc = 0, /* broadcast -- no response */
+ sd_bcr, /* broadcast with response */
+ sd_ac, /* addressed -- no data transfer */
+ sd_adtc, /* addressed with data transfer */
} sd_cmd_type_t;
typedef struct {
@@ -88,24 +89,37 @@ typedef struct {
uint8_t crc;
} SDRequest;
-typedef struct SDState SDState;
-typedef struct SDBus SDBus;
#define TYPE_SD_CARD "sd-card"
-#define SD_CARD(obj) OBJECT_CHECK(SDState, (obj), TYPE_SD_CARD)
-#define SD_CARD_CLASS(klass) \
- OBJECT_CLASS_CHECK(SDCardClass, (klass), TYPE_SD_CARD)
-#define SD_CARD_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SDCardClass, (obj), TYPE_SD_CARD)
+OBJECT_DECLARE_TYPE(SDState, SDCardClass, SD_CARD)
-typedef struct {
+#define TYPE_SD_CARD_SPI "sd-card-spi"
+DECLARE_INSTANCE_CHECKER(SDState, SD_CARD_SPI, TYPE_SD_CARD_SPI)
+
+struct SDCardClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
int (*do_command)(SDState *sd, SDRequest *req, uint8_t *response);
- void (*write_data)(SDState *sd, uint8_t value);
- uint8_t (*read_data)(SDState *sd);
+ /**
+ * Write a byte to a SD card.
+ * @sd: card
+ * @value: byte to write
+ *
+ * Write a byte on the data lines of a SD card.
+ */
+ void (*write_byte)(SDState *sd, uint8_t value);
+ /**
+ * Read a byte from a SD card.
+ * @sd: card
+ *
+ * Read a byte from the data lines of a SD card.
+ *
+ * Return: byte value read
+ */
+ uint8_t (*read_byte)(SDState *sd);
+ bool (*receive_ready)(SDState *sd);
bool (*data_ready)(SDState *sd);
void (*set_voltage)(SDState *sd, uint16_t millivolts);
uint8_t (*get_dat_lines)(SDState *sd);
@@ -113,18 +127,19 @@ typedef struct {
void (*enable)(SDState *sd, bool enable);
bool (*get_inserted)(SDState *sd);
bool (*get_readonly)(SDState *sd);
-} SDCardClass;
+
+ const struct SDProto *proto;
+};
#define TYPE_SD_BUS "sd-bus"
-#define SD_BUS(obj) OBJECT_CHECK(SDBus, (obj), TYPE_SD_BUS)
-#define SD_BUS_CLASS(klass) OBJECT_CLASS_CHECK(SDBusClass, (klass), TYPE_SD_BUS)
-#define SD_BUS_GET_CLASS(obj) OBJECT_GET_CLASS(SDBusClass, (obj), TYPE_SD_BUS)
+OBJECT_DECLARE_TYPE(SDBus, SDBusClass,
+ SD_BUS)
struct SDBus {
BusState qbus;
};
-typedef struct {
+struct SDBusClass {
/*< private >*/
BusClass parent_class;
/*< public >*/
@@ -134,24 +149,7 @@ typedef struct {
*/
void (*set_inserted)(DeviceState *dev, bool inserted);
void (*set_readonly)(DeviceState *dev, bool readonly);
-} SDBusClass;
-
-/* Legacy functions to be used only by non-qdevified callers */
-SDState *sd_init(BlockBackend *bs, bool is_spi);
-int sd_do_command(SDState *sd, SDRequest *req,
- uint8_t *response);
-void sd_write_data(SDState *sd, uint8_t value);
-uint8_t sd_read_data(SDState *sd);
-void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert);
-bool sd_data_ready(SDState *sd);
-/* sd_enable should not be used -- it is only used on the nseries boards,
- * where it is part of a broken implementation of the MMC card slot switch
- * (there should be two card slots which are multiplexed to a single MMC
- * controller, but instead we model it with one card and controller and
- * disable the card when the second slot is selected, so it looks like the
- * second slot is always empty).
- */
-void sd_enable(SDState *sd, bool enable);
+};
/* Functions to be used by qdevified callers (working via
* an SDBus rather than directly with SDState)
@@ -160,8 +158,42 @@ void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts);
uint8_t sdbus_get_dat_lines(SDBus *sdbus);
bool sdbus_get_cmd_line(SDBus *sdbus);
int sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *response);
-void sdbus_write_data(SDBus *sd, uint8_t value);
-uint8_t sdbus_read_data(SDBus *sd);
+/**
+ * Write a byte to a SD bus.
+ * @sd: bus
+ * @value: byte to write
+ *
+ * Write a byte on the data lines of a SD bus.
+ */
+void sdbus_write_byte(SDBus *sd, uint8_t value);
+/**
+ * Read a byte from a SD bus.
+ * @sd: bus
+ *
+ * Read a byte from the data lines of a SD bus.
+ *
+ * Return: byte value read
+ */
+uint8_t sdbus_read_byte(SDBus *sd);
+/**
+ * Write data to a SD bus.
+ * @sdbus: bus
+ * @buf: data to write
+ * @length: number of bytes to write
+ *
+ * Write multiple bytes of data on the data lines of a SD bus.
+ */
+void sdbus_write_data(SDBus *sdbus, const void *buf, size_t length);
+/**
+ * Read data from a SD bus.
+ * @sdbus: bus
+ * @buf: buffer to read data into
+ * @length: number of bytes to read
+ *
+ * Read multiple bytes of data on the data lines of a SD bus.
+ */
+void sdbus_read_data(SDBus *sdbus, void *buf, size_t length);
+bool sdbus_receive_ready(SDBus *sd);
bool sdbus_data_ready(SDBus *sd);
bool sdbus_get_inserted(SDBus *sd);
bool sdbus_get_readonly(SDBus *sd);
diff --git a/include/hw/sd/sdcard_legacy.h b/include/hw/sd/sdcard_legacy.h
new file mode 100644
index 0000000000..0dc3889555
--- /dev/null
+++ b/include/hw/sd/sdcard_legacy.h
@@ -0,0 +1,50 @@
+/*
+ * SD Memory Card emulation (deprecated legacy API)
+ *
+ * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef HW_SDCARD_LEGACY_H
+#define HW_SDCARD_LEGACY_H
+
+#include "hw/sd/sd.h"
+
+/* Legacy functions to be used only by non-qdevified callers */
+SDState *sd_init(BlockBackend *blk, bool is_spi);
+int sd_do_command(SDState *card, SDRequest *request, uint8_t *response);
+void sd_write_byte(SDState *card, uint8_t value);
+uint8_t sd_read_byte(SDState *card);
+void sd_set_cb(SDState *card, qemu_irq readonly, qemu_irq insert);
+
+/* sd_enable should not be used -- it is only used on the nseries boards,
+ * where it is part of a broken implementation of the MMC card slot switch
+ * (there should be two card slots which are multiplexed to a single MMC
+ * controller, but instead we model it with one card and controller and
+ * disable the card when the second slot is selected, so it looks like the
+ * second slot is always empty).
+ */
+void sd_enable(SDState *card, bool enable);
+
+#endif /* HW_SDCARD_LEGACY_H */
diff --git a/include/hw/sd/sdhci.h b/include/hw/sd/sdhci.h
index 5d9275f3d6..6cd2822f1d 100644
--- a/include/hw/sd/sdhci.h
+++ b/include/hw/sd/sdhci.h
@@ -25,12 +25,13 @@
#ifndef SDHCI_H
#define SDHCI_H
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/sysbus.h"
#include "hw/sd/sd.h"
+#include "qom/object.h"
/* SD/MMC host controller state */
-typedef struct SDHCIState {
+struct SDHCIState {
/*< private >*/
union {
PCIDevice pcidev;
@@ -95,10 +96,12 @@ typedef struct SDHCIState {
/* Configurable properties */
bool pending_insert_quirk; /* Quirk for Raspberry Pi card insert int */
uint32_t quirks;
+ uint8_t endianness;
uint8_t sd_spec_version;
uint8_t uhs_mode;
uint8_t vendor; /* For vendor specific functionality */
-} SDHCIState;
+};
+typedef struct SDHCIState SDHCIState;
#define SDHCI_VENDOR_NONE 0
#define SDHCI_VENDOR_IMX 1
@@ -113,11 +116,12 @@ typedef struct SDHCIState {
#define SDHCI_QUIRK_NO_BUSY_IRQ BIT(14)
#define TYPE_PCI_SDHCI "sdhci-pci"
-#define PCI_SDHCI(obj) OBJECT_CHECK(SDHCIState, (obj), TYPE_PCI_SDHCI)
+DECLARE_INSTANCE_CHECKER(SDHCIState, PCI_SDHCI,
+ TYPE_PCI_SDHCI)
#define TYPE_SYSBUS_SDHCI "generic-sdhci"
-#define SYSBUS_SDHCI(obj) \
- OBJECT_CHECK(SDHCIState, (obj), TYPE_SYSBUS_SDHCI)
+DECLARE_INSTANCE_CHECKER(SDHCIState, SYSBUS_SDHCI,
+ TYPE_SYSBUS_SDHCI)
#define TYPE_IMX_USDHC "imx-usdhc"
diff --git a/include/hw/semihosting/console.h b/include/hw/semihosting/console.h
deleted file mode 100644
index 0238f540f4..0000000000
--- a/include/hw/semihosting/console.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Semihosting Console
- *
- * Copyright (c) 2019 Linaro Ltd
- *
- * SPDX-License-Identifier: GPL-2.0-or-later
- */
-
-#ifndef SEMIHOST_CONSOLE_H
-#define SEMIHOST_CONSOLE_H
-
-#include "cpu.h"
-
-/**
- * qemu_semihosting_console_outs:
- * @env: CPUArchState
- * @s: host address of null terminated guest string
- *
- * Send a null terminated guest string to the debug console. This may
- * be the remote gdb session if a softmmu guest is currently being
- * debugged.
- *
- * Returns: number of bytes written.
- */
-int qemu_semihosting_console_outs(CPUArchState *env, target_ulong s);
-
-/**
- * qemu_semihosting_console_outc:
- * @env: CPUArchState
- * @s: host address of null terminated guest string
- *
- * Send single character from guest memory to the debug console. This
- * may be the remote gdb session if a softmmu guest is currently being
- * debugged.
- *
- * Returns: nothing
- */
-void qemu_semihosting_console_outc(CPUArchState *env, target_ulong c);
-
-/**
- * qemu_semihosting_console_inc:
- * @env: CPUArchState
- *
- * Receive single character from debug console. This may be the remote
- * gdb session if a softmmu guest is currently being debugged. As this
- * call may block if no data is available we suspend the CPU and will
- * re-execute the instruction when data is there. Therefore two
- * conditions must be met:
- * - CPUState is synchronized before calling this function
- * - pc is only updated once the character is successfully returned
- *
- * Returns: character read OR cpu_loop_exit!
- */
-target_ulong qemu_semihosting_console_inc(CPUArchState *env);
-
-/**
- * qemu_semihosting_log_out:
- * @s: pointer to string
- * @len: length of string
- *
- * Send a string to the debug output. Unlike console_out these strings
- * can't be sent to a remote gdb instance as they don't exist in guest
- * memory.
- *
- * Returns: number of bytes written
- */
-int qemu_semihosting_log_out(const char *s, int len);
-
-#endif /* SEMIHOST_CONSOLE_H */
diff --git a/include/hw/sensor/emc141x_regs.h b/include/hw/sensor/emc141x_regs.h
new file mode 100644
index 0000000000..e509a43d55
--- /dev/null
+++ b/include/hw/sensor/emc141x_regs.h
@@ -0,0 +1,37 @@
+/*
+ * SMSC EMC141X temperature sensor.
+ *
+ * Browse the data sheet:
+ *
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/20005274A.pdf
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef EMC141X_REGS_H
+#define EMC141X_REGS_H
+
+#define EMC1413_DEVICE_ID 0x21
+#define EMC1414_DEVICE_ID 0x25
+#define MANUFACTURER_ID 0x5d
+#define REVISION 0x04
+
+/* the EMC141X registers */
+#define EMC141X_TEMP_HIGH0 0x00
+#define EMC141X_TEMP_HIGH1 0x01
+#define EMC141X_TEMP_HIGH2 0x23
+#define EMC141X_TEMP_HIGH3 0x2a
+#define EMC141X_TEMP_MAX_HIGH0 0x05
+#define EMC141X_TEMP_MIN_HIGH0 0x06
+#define EMC141X_TEMP_MAX_HIGH1 0x07
+#define EMC141X_TEMP_MIN_HIGH1 0x08
+#define EMC141X_TEMP_MAX_HIGH2 0x15
+#define EMC141X_TEMP_MIN_HIGH2 0x16
+#define EMC141X_TEMP_MAX_HIGH3 0x2c
+#define EMC141X_TEMP_MIN_HIGH3 0x2d
+#define EMC141X_DEVICE_ID 0xfd
+#define EMC141X_MANUFACTURER_ID 0xfe
+#define EMC141X_REVISION 0xff
+
+#endif
diff --git a/include/hw/sensor/isl_pmbus_vr.h b/include/hw/sensor/isl_pmbus_vr.h
new file mode 100644
index 0000000000..aa2c2767df
--- /dev/null
+++ b/include/hw/sensor/isl_pmbus_vr.h
@@ -0,0 +1,57 @@
+/*
+ * PMBus device for Renesas Digital Multiphase Voltage Regulators
+ *
+ * Copyright 2022 Google LLC
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MISC_ISL_PMBUS_VR_H
+#define HW_MISC_ISL_PMBUS_VR_H
+
+#include "hw/i2c/pmbus_device.h"
+#include "qom/object.h"
+
+#define TYPE_ISL69259 "isl69259"
+#define TYPE_ISL69260 "isl69260"
+#define TYPE_RAA228000 "raa228000"
+#define TYPE_RAA229004 "raa229004"
+#define ISL_MAX_IC_DEVICE_ID_LEN 16
+
+struct ISLState {
+ PMBusDevice parent;
+
+ uint8_t ic_device_id[ISL_MAX_IC_DEVICE_ID_LEN];
+ uint8_t ic_device_id_len;
+};
+
+OBJECT_DECLARE_SIMPLE_TYPE(ISLState, ISL69260)
+
+#define ISL_CAPABILITY_DEFAULT 0x40
+#define ISL_OPERATION_DEFAULT 0x80
+#define ISL_ON_OFF_CONFIG_DEFAULT 0x16
+#define ISL_VOUT_MODE_DEFAULT 0x40
+#define ISL_VOUT_COMMAND_DEFAULT 0x0384
+#define ISL_VOUT_MAX_DEFAULT 0x08FC
+#define ISL_VOUT_MARGIN_HIGH_DEFAULT 0x0640
+#define ISL_VOUT_MARGIN_LOW_DEFAULT 0xFA
+#define ISL_VOUT_TRANSITION_RATE_DEFAULT 0x64
+#define ISL_VOUT_OV_FAULT_LIMIT_DEFAULT 0x076C
+#define ISL_OT_FAULT_LIMIT_DEFAULT 0x7D
+#define ISL_OT_WARN_LIMIT_DEFAULT 0x07D0
+#define ISL_VIN_OV_WARN_LIMIT_DEFAULT 0x36B0
+#define ISL_VIN_UV_WARN_LIMIT_DEFAULT 0x1F40
+#define ISL_IIN_OC_FAULT_LIMIT_DEFAULT 0x32
+#define ISL_TON_DELAY_DEFAULT 0x14
+#define ISL_TON_RISE_DEFAULT 0x01F4
+#define ISL_TOFF_FALL_DEFAULT 0x01F4
+#define ISL_REVISION_DEFAULT 0x33
+#define ISL_READ_VOUT_DEFAULT 1000
+#define ISL_READ_IOUT_DEFAULT 40
+#define ISL_READ_POUT_DEFAULT 4
+#define ISL_READ_TEMP_DEFAULT 25
+#define ISL_READ_VIN_DEFAULT 1100
+#define ISL_READ_IIN_DEFAULT 40
+#define ISL_READ_PIN_DEFAULT 4
+
+#endif
diff --git a/include/hw/sensor/tmp105.h b/include/hw/sensor/tmp105.h
new file mode 100644
index 0000000000..244e2989fe
--- /dev/null
+++ b/include/hw/sensor/tmp105.h
@@ -0,0 +1,55 @@
+/*
+ * Texas Instruments TMP105 Temperature Sensor
+ *
+ * Browse the data sheet:
+ *
+ * http://www.ti.com/lit/gpn/tmp105
+ *
+ * Copyright (C) 2012 Alex Horn <alex.horn@cs.ox.ac.uk>
+ * Copyright (C) 2008-2012 Andrzej Zaborowski <balrogg@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_TMP105_H
+#define QEMU_TMP105_H
+
+#include "hw/i2c/i2c.h"
+#include "hw/sensor/tmp105_regs.h"
+#include "qom/object.h"
+
+#define TYPE_TMP105 "tmp105"
+OBJECT_DECLARE_SIMPLE_TYPE(TMP105State, TMP105)
+
+/**
+ * TMP105State:
+ * @config: Bits 5 and 6 (value 32 and 64) determine the precision of the
+ * temperature. See Table 8 in the data sheet.
+ *
+ * @see_also: http://www.ti.com/lit/gpn/tmp105
+ */
+struct TMP105State {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t buf[2];
+ qemu_irq pin;
+
+ uint8_t pointer;
+ uint8_t config;
+ int16_t temperature;
+ int16_t limit[2];
+ int faults;
+ uint8_t alarm;
+ /*
+ * The TMP105 initially looks for a temperature rising above T_high;
+ * once this is detected, the condition it looks for next is the
+ * temperature falling below T_low. This flag is false when initially
+ * looking for T_high, true when looking for T_low.
+ */
+ bool detect_falling;
+};
+
+#endif
diff --git a/include/hw/misc/tmp105_regs.h b/include/hw/sensor/tmp105_regs.h
index ef015ee5cf..ef015ee5cf 100644
--- a/include/hw/misc/tmp105_regs.h
+++ b/include/hw/sensor/tmp105_regs.h
diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h
index 93f464bf4c..ec716cdd45 100644
--- a/include/hw/sh4/sh.h
+++ b/include/hw/sh4/sh.h
@@ -1,6 +1,31 @@
-#ifndef QEMU_SH_H
-#define QEMU_SH_H
-/* Definitions for SH board emulation. */
+/*
+ * Definitions for SH board emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef QEMU_HW_SH_H
+#define QEMU_HW_SH_H
#include "hw/sh4/sh_intc.h"
#include "target/sh4/cpu-qom.h"
@@ -19,25 +44,18 @@ typedef struct {
uint16_t portbmask_trigger;
/* Return 0 if no action was taken */
int (*port_change_cb) (uint16_t porta, uint16_t portb,
- uint16_t * periph_pdtra,
- uint16_t * periph_portdira,
- uint16_t * periph_pdtrb,
- uint16_t * periph_portdirb);
+ uint16_t *periph_pdtra,
+ uint16_t *periph_portdira,
+ uint16_t *periph_pdtrb,
+ uint16_t *periph_portdirb);
} sh7750_io_device;
int sh7750_register_io_device(struct SH7750State *s,
- sh7750_io_device * device);
+ sh7750_io_device *device);
/* sh_serial.c */
+#define TYPE_SH_SERIAL "sh-serial"
#define SH_SERIAL_FEAT_SCIF (1 << 0)
-void sh_serial_init(MemoryRegion *sysmem,
- hwaddr base, int feat,
- uint32_t freq, Chardev *chr,
- qemu_irq eri_source,
- qemu_irq rxi_source,
- qemu_irq txi_source,
- qemu_irq tei_source,
- qemu_irq bri_source);
/* sh7750.c */
qemu_irq sh7750_irl(struct SH7750State *s);
diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h
index 65f3425057..f62d5c5e13 100644
--- a/include/hw/sh4/sh_intc.h
+++ b/include/hw/sh4/sh_intc.h
@@ -58,7 +58,7 @@ struct intc_desc {
};
int sh_intc_get_pending_vector(struct intc_desc *desc, int imask);
-struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id);
+
void sh_intc_toggle_source(struct intc_source *source,
int enable_adj, int assert_adj);
diff --git a/include/hw/i386/ich9.h b/include/hw/southbridge/ich9.h
index a98d10b252..fd01649d04 100644
--- a/include/hw/i386/ich9.h
+++ b/include/hw/southbridge/ich9.h
@@ -1,32 +1,24 @@
-#ifndef HW_ICH9_H
-#define HW_ICH9_H
+#ifndef HW_SOUTHBRIDGE_ICH9_H
+#define HW_SOUTHBRIDGE_ICH9_H
-#include "hw/isa/isa.h"
-#include "hw/sysbus.h"
-#include "hw/i386/pc.h"
#include "hw/isa/apm.h"
-#include "hw/pci/pci.h"
-#include "hw/pci/pcie_host.h"
-#include "hw/pci/pci_bridge.h"
-#include "hw/acpi/acpi.h"
#include "hw/acpi/ich9.h"
-#include "hw/pci/pci_bus.h"
-
-void ich9_lpc_set_irq(void *opaque, int irq_num, int level);
-int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx);
-PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin);
-void ich9_lpc_pm_init(PCIDevice *pci_lpc, bool smm_enabled);
-I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base);
+#include "hw/intc/ioapic.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
+#include "hw/rtc/mc146818rtc.h"
+#include "exec/memory.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
void ich9_generate_smi(void);
#define ICH9_CC_SIZE (16 * 1024) /* 16KB. Chipset configuration registers */
#define TYPE_ICH9_LPC_DEVICE "ICH9-LPC"
-#define ICH9_LPC_DEVICE(obj) \
- OBJECT_CHECK(ICH9LPCState, (obj), TYPE_ICH9_LPC_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ICH9LPCState, ICH9_LPC_DEVICE)
-typedef struct ICH9LPCState {
+struct ICH9LPCState {
/* ICH9 LPC PCI to ISA bridge */
PCIDevice d;
@@ -39,6 +31,7 @@ typedef struct ICH9LPCState {
*/
uint8_t irr[PCI_SLOT_MAX][PCI_NUM_PINS];
+ MC146818RtcState rtc;
APMState apm;
ICH9LPCPMRegs pm;
uint32_t sci_level; /* track sci level */
@@ -71,15 +64,13 @@ typedef struct ICH9LPCState {
* triggers feature lockdown */
uint64_t smi_negotiated_features; /* guest-invisible, host endian */
- /* isa bus */
- ISABus *isa_bus;
MemoryRegion rcrb_mem; /* root complex register block */
Notifier machine_ready;
- qemu_irq gsi[GSI_NUM_PINS];
-} ICH9LPCState;
+ qemu_irq gsi[IOAPIC_NUM_PINS];
+};
-#define Q35_MASK(bit, ms_bit, ls_bit) \
+#define ICH9_MASK(bit, ms_bit, ls_bit) \
((uint##bit##_t)(((1ULL << ((ms_bit) + 1)) - 1) & ~((1ULL << ls_bit) - 1)))
/* ICH9: Chipset Configuration Registers */
@@ -141,12 +132,13 @@ typedef struct ICH9LPCState {
#define ICH9_LPC_NB_PIRQS 8 /* PCI A-H */
#define ICH9_LPC_PMBASE 0x40
-#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK Q35_MASK(32, 15, 7)
+#define ICH9_LPC_PMBASE_BASE_ADDRESS_MASK ICH9_MASK(32, 15, 7)
#define ICH9_LPC_PMBASE_RTE 0x1
#define ICH9_LPC_PMBASE_DEFAULT 0x1
+
#define ICH9_LPC_ACPI_CTRL 0x44
#define ICH9_LPC_ACPI_CTRL_ACPI_EN 0x80
-#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK Q35_MASK(8, 2, 0)
+#define ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK ICH9_MASK(8, 2, 0)
#define ICH9_LPC_ACPI_CTRL_9 0x0
#define ICH9_LPC_ACPI_CTRL_10 0x1
#define ICH9_LPC_ACPI_CTRL_11 0x2
@@ -165,7 +157,7 @@ typedef struct ICH9LPCState {
#define ICH9_LPC_PIRQH_ROUT 0x6b
#define ICH9_LPC_PIRQ_ROUT_IRQEN 0x80
-#define ICH9_LPC_PIRQ_ROUT_MASK Q35_MASK(8, 3, 0)
+#define ICH9_LPC_PIRQ_ROUT_MASK ICH9_MASK(8, 3, 0)
#define ICH9_LPC_PIRQ_ROUT_DEFAULT 0x80
#define ICH9_LPC_GEN_PMCON_1 0xa0
@@ -175,7 +167,7 @@ typedef struct ICH9LPCState {
#define ICH9_LPC_GEN_PMCON_LOCK 0xa6
#define ICH9_LPC_RCBA 0xf0
-#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14)
+#define ICH9_LPC_RCBA_BA_MASK ICH9_MASK(32, 31, 14)
#define ICH9_LPC_RCBA_EN 0x1
#define ICH9_LPC_RCBA_DEFAULT 0x0
@@ -215,7 +207,7 @@ typedef struct ICH9LPCState {
/* D31:F3 SMBus controller */
-#define TYPE_ICH9_SMB_DEVICE "ICH9 SMB"
+#define TYPE_ICH9_SMB_DEVICE "ICH9-SMB"
#define ICH9_A2_SMB_REVISION 0x02
#define ICH9_SMB_PI 0x00
@@ -245,7 +237,11 @@ typedef struct ICH9LPCState {
#define ICH9_SMB_HST_D1 0x06
#define ICH9_SMB_HOST_BLOCK_DB 0x07
+#define ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP "x-smi-negotiated-features"
+
/* bit positions used in fw_cfg SMI feature negotiation */
#define ICH9_LPC_SMI_F_BROADCAST_BIT 0
+#define ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT 1
+#define ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT 2
-#endif /* HW_ICH9_H */
+#endif /* HW_SOUTHBRIDGE_ICH9_H */
diff --git a/include/hw/southbridge/piix.h b/include/hw/southbridge/piix.h
index 02bd741209..86709ba2e4 100644
--- a/include/hw/southbridge/piix.h
+++ b/include/hw/southbridge/piix.h
@@ -12,13 +12,11 @@
#ifndef HW_SOUTHBRIDGE_PIIX_H
#define HW_SOUTHBRIDGE_PIIX_H
-#include "hw/pci/pci.h"
-
-#define TYPE_PIIX4_PM "PIIX4_PM"
-
-I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
- qemu_irq sci_irq, qemu_irq smi_irq,
- int smm_enabled, DeviceState **piix4_pm);
+#include "hw/pci/pci_device.h"
+#include "hw/acpi/piix4.h"
+#include "hw/ide/pci.h"
+#include "hw/rtc/mc146818rtc.h"
+#include "hw/usb/hcd-uhci.h"
/* PIRQRC[A:D]: PIRQx Route Control Registers */
#define PIIX_PIRQCA 0x60
@@ -32,10 +30,9 @@ I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
*/
#define PIIX_RCR_IOPORT 0xcf9
-#define PIIX_NUM_PIC_IRQS 16 /* i8259 * 2 */
#define PIIX_NUM_PIRQS 4ULL /* PIRQ[A-D] */
-typedef struct PIIXState {
+struct PIIXState {
PCIDevice dev;
/*
@@ -44,30 +41,44 @@ typedef struct PIIXState {
* So one PIC level is tracked by PIIX_NUM_PIRQS bits.
*
* PIRQ is mapped to PIC pins, we track it by
- * PIIX_NUM_PIRQS * PIIX_NUM_PIC_IRQS = 64 bits with
+ * PIIX_NUM_PIRQS * ISA_NUM_IRQS = 64 bits with
* pic_irq * PIIX_NUM_PIRQS + pirq
*/
-#if PIIX_NUM_PIC_IRQS * PIIX_NUM_PIRQS > 64
+#if ISA_NUM_IRQS * PIIX_NUM_PIRQS > 64
#error "unable to encode pic state in 64bit in pic_levels."
#endif
uint64_t pic_levels;
- qemu_irq *pic;
+ qemu_irq cpu_intr;
+ qemu_irq isa_irqs_in[ISA_NUM_IRQS];
/* This member isn't used. Just for save/load compatibility */
int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS];
+ MC146818RtcState rtc;
+ PCIIDEState ide;
+ UHCIState uhci;
+ PIIX4PMState pm;
+
+ uint32_t smb_io_base;
+
/* Reset Control Register contents */
uint8_t rcr;
/* IO memory region for Reset Control Register (PIIX_RCR_IOPORT) */
MemoryRegion rcr_mem;
-} PIIX3State;
-extern PCIDevice *piix4_dev;
+ bool has_acpi;
+ bool has_pic;
+ bool has_pit;
+ bool has_usb;
+ bool smm_enabled;
+};
-PIIX3State *piix3_create(PCIBus *pci_bus, ISABus **isa_bus);
+#define TYPE_PIIX_PCI_DEVICE "pci-piix"
+OBJECT_DECLARE_SIMPLE_TYPE(PIIXState, PIIX_PCI_DEVICE)
-DeviceState *piix4_create(PCIBus *pci_bus, ISABus **isa_bus, I2CBus **smbus);
+#define TYPE_PIIX3_DEVICE "PIIX3"
+#define TYPE_PIIX4_PCI_DEVICE "piix4-isa"
#endif
diff --git a/include/hw/sparc/sparc32_dma.h b/include/hw/sparc/sparc32_dma.h
index ab42c5421b..cde8ec02cb 100644
--- a/include/hw/sparc/sparc32_dma.h
+++ b/include/hw/sparc/sparc32_dma.h
@@ -4,14 +4,13 @@
#include "hw/sysbus.h"
#include "hw/scsi/esp.h"
#include "hw/net/lance.h"
+#include "qom/object.h"
#define DMA_REGS 4
#define TYPE_SPARC32_DMA_DEVICE "sparc32-dma-device"
-#define SPARC32_DMA_DEVICE(obj) OBJECT_CHECK(DMADeviceState, (obj), \
- TYPE_SPARC32_DMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(DMADeviceState, SPARC32_DMA_DEVICE)
-typedef struct DMADeviceState DMADeviceState;
struct DMADeviceState {
SysBusDevice parent_obj;
@@ -24,37 +23,34 @@ struct DMADeviceState {
};
#define TYPE_SPARC32_ESPDMA_DEVICE "sparc32-espdma"
-#define SPARC32_ESPDMA_DEVICE(obj) OBJECT_CHECK(ESPDMADeviceState, (obj), \
- TYPE_SPARC32_ESPDMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(ESPDMADeviceState, SPARC32_ESPDMA_DEVICE)
-typedef struct ESPDMADeviceState {
+struct ESPDMADeviceState {
DMADeviceState parent_obj;
- SysBusESPState *esp;
-} ESPDMADeviceState;
+ SysBusESPState esp;
+};
#define TYPE_SPARC32_LEDMA_DEVICE "sparc32-ledma"
-#define SPARC32_LEDMA_DEVICE(obj) OBJECT_CHECK(LEDMADeviceState, (obj), \
- TYPE_SPARC32_LEDMA_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(LEDMADeviceState, SPARC32_LEDMA_DEVICE)
-typedef struct LEDMADeviceState {
+struct LEDMADeviceState {
DMADeviceState parent_obj;
- SysBusPCNetState *lance;
-} LEDMADeviceState;
+ SysBusPCNetState lance;
+};
#define TYPE_SPARC32_DMA "sparc32-dma"
-#define SPARC32_DMA(obj) OBJECT_CHECK(SPARC32DMAState, (obj), \
- TYPE_SPARC32_DMA)
+OBJECT_DECLARE_SIMPLE_TYPE(SPARC32DMAState, SPARC32_DMA)
-typedef struct SPARC32DMAState {
+struct SPARC32DMAState {
SysBusDevice parent_obj;
MemoryRegion dmamem;
MemoryRegion ledma_alias;
- ESPDMADeviceState *espdma;
- LEDMADeviceState *ledma;
-} SPARC32DMAState;
+ ESPDMADeviceState espdma;
+ LEDMADeviceState ledma;
+};
/* sparc32_dma.c */
void ledma_memory_read(void *opaque, hwaddr addr,
diff --git a/include/hw/sparc/sun4m_iommu.h b/include/hw/sparc/sun4m_iommu.h
index 482266c6a7..4e2ab34cde 100644
--- a/include/hw/sparc/sun4m_iommu.h
+++ b/include/hw/sparc/sun4m_iommu.h
@@ -26,10 +26,11 @@
#define SUN4M_IOMMU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define IOMMU_NREGS (4 * 4096 / 4)
-typedef struct IOMMUState {
+struct IOMMUState {
SysBusDevice parent_obj;
AddressSpace iommu_as;
@@ -40,10 +41,12 @@ typedef struct IOMMUState {
hwaddr iostart;
qemu_irq irq;
uint32_t version;
-} IOMMUState;
+};
+typedef struct IOMMUState IOMMUState;
#define TYPE_SUN4M_IOMMU "sun4m-iommu"
-#define SUN4M_IOMMU(obj) OBJECT_CHECK(IOMMUState, (obj), TYPE_SUN4M_IOMMU)
+DECLARE_INSTANCE_CHECKER(IOMMUState, SUN4M_IOMMU,
+ TYPE_SUN4M_IOMMU)
#define TYPE_SUN4M_IOMMU_MEMORY_REGION "sun4m-iommu-memory-region"
diff --git a/include/hw/sparc/sun4u_iommu.h b/include/hw/sparc/sun4u_iommu.h
index 5472d489cf..f94566a72c 100644
--- a/include/hw/sparc/sun4u_iommu.h
+++ b/include/hw/sparc/sun4u_iommu.h
@@ -28,10 +28,11 @@
#define SUN4U_IOMMU_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define IOMMU_NREGS 3
-typedef struct IOMMUState {
+struct IOMMUState {
SysBusDevice parent_obj;
AddressSpace iommu_as;
@@ -39,10 +40,12 @@ typedef struct IOMMUState {
MemoryRegion iomem;
uint64_t regs[IOMMU_NREGS];
-} IOMMUState;
+};
+typedef struct IOMMUState IOMMUState;
#define TYPE_SUN4U_IOMMU "sun4u-iommu"
-#define SUN4U_IOMMU(obj) OBJECT_CHECK(IOMMUState, (obj), TYPE_SUN4U_IOMMU)
+DECLARE_INSTANCE_CHECKER(IOMMUState, SUN4U_IOMMU,
+ TYPE_SUN4U_IOMMU)
#define TYPE_SUN4U_IOMMU_MEMORY_REGION "sun4u-iommu-memory-region"
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
index 6fbbb238f1..8e1dda556b 100644
--- a/include/hw/ssi/aspeed_smc.h
+++ b/include/hw/ssi/aspeed_smc.h
@@ -27,71 +27,38 @@
#include "hw/ssi/ssi.h"
#include "hw/sysbus.h"
-
-typedef struct AspeedSegments {
- hwaddr addr;
- uint32_t size;
-} AspeedSegments;
+#include "qom/object.h"
struct AspeedSMCState;
-typedef struct AspeedSMCController {
- const char *name;
- uint8_t r_conf;
- uint8_t r_ce_ctrl;
- uint8_t r_ctrl0;
- uint8_t r_timings;
- uint8_t nregs_timings;
- uint8_t conf_enable_w0;
- uint8_t max_slaves;
- const AspeedSegments *segments;
- hwaddr flash_window_base;
- uint32_t flash_window_size;
- bool has_dma;
- hwaddr dma_flash_mask;
- hwaddr dma_dram_mask;
- uint32_t nregs;
- uint32_t (*segment_to_reg)(const struct AspeedSMCState *s,
- const AspeedSegments *seg);
- void (*reg_to_segment)(const struct AspeedSMCState *s, uint32_t reg,
- AspeedSegments *seg);
-} AspeedSMCController;
+struct AspeedSMCClass;
-typedef struct AspeedSMCFlash {
- struct AspeedSMCState *controller;
+#define TYPE_ASPEED_SMC_FLASH "aspeed.smc.flash"
+OBJECT_DECLARE_SIMPLE_TYPE(AspeedSMCFlash, ASPEED_SMC_FLASH)
+struct AspeedSMCFlash {
+ SysBusDevice parent_obj;
- uint8_t id;
- uint32_t size;
+ struct AspeedSMCState *controller;
+ struct AspeedSMCClass *asc;
+ uint8_t cs;
MemoryRegion mmio;
- DeviceState *flash;
-} AspeedSMCFlash;
+};
#define TYPE_ASPEED_SMC "aspeed.smc"
-#define ASPEED_SMC(obj) OBJECT_CHECK(AspeedSMCState, (obj), TYPE_ASPEED_SMC)
-#define ASPEED_SMC_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedSMCClass, (klass), TYPE_ASPEED_SMC)
-#define ASPEED_SMC_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedSMCClass, (obj), TYPE_ASPEED_SMC)
-
-typedef struct AspeedSMCClass {
- SysBusDevice parent_obj;
- const AspeedSMCController *ctrl;
-} AspeedSMCClass;
+OBJECT_DECLARE_TYPE(AspeedSMCState, AspeedSMCClass, ASPEED_SMC)
#define ASPEED_SMC_R_MAX (0x100 / 4)
+#define ASPEED_SMC_CS_MAX 5
-typedef struct AspeedSMCState {
+struct AspeedSMCState {
SysBusDevice parent_obj;
- const AspeedSMCController *ctrl;
-
MemoryRegion mmio;
+ MemoryRegion mmio_flash_container;
MemoryRegion mmio_flash;
qemu_irq irq;
- int irqline;
- uint32_t num_cs;
qemu_irq *cs_lines;
bool inject_failure;
@@ -106,17 +73,46 @@ typedef struct AspeedSMCState {
uint8_t r_timings;
uint8_t conf_enable_w0;
- /* for DMA support */
- uint64_t sdram_base;
-
AddressSpace flash_as;
MemoryRegion *dram_mr;
AddressSpace dram_as;
- AspeedSMCFlash *flashes;
+ AspeedSMCFlash flashes[ASPEED_SMC_CS_MAX];
uint8_t snoop_index;
uint8_t snoop_dummies;
-} AspeedSMCState;
+};
+
+typedef struct AspeedSegments {
+ hwaddr addr;
+ uint32_t size;
+} AspeedSegments;
+
+struct AspeedSMCClass {
+ SysBusDeviceClass parent_obj;
+
+ uint8_t r_conf;
+ uint8_t r_ce_ctrl;
+ uint8_t r_ctrl0;
+ uint8_t r_timings;
+ uint8_t nregs_timings;
+ uint8_t conf_enable_w0;
+ uint8_t cs_num_max;
+ const uint32_t *resets;
+ const AspeedSegments *segments;
+ uint32_t segment_addr_mask;
+ hwaddr flash_window_base;
+ uint32_t flash_window_size;
+ uint32_t features;
+ hwaddr dma_flash_mask;
+ hwaddr dma_dram_mask;
+ uint32_t nregs;
+ uint32_t (*segment_to_reg)(const AspeedSMCState *s,
+ const AspeedSegments *seg);
+ void (*reg_to_segment)(const AspeedSMCState *s, uint32_t reg,
+ AspeedSegments *seg);
+ void (*dma_ctrl)(AspeedSMCState *s, uint32_t value);
+ int (*addr_width)(const AspeedSMCState *s);
+};
#endif /* ASPEED_SMC_H */
diff --git a/include/hw/ssi/bcm2835_spi.h b/include/hw/ssi/bcm2835_spi.h
new file mode 100644
index 0000000000..d3f8cec111
--- /dev/null
+++ b/include/hw/ssi/bcm2835_spi.h
@@ -0,0 +1,81 @@
+/*
+ * BCM2835 SPI Master Controller
+ *
+ * Copyright (c) 2024 Rayhan Faizel <rayhan.faizel@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/ssi/ssi.h"
+#include "qom/object.h"
+#include "qemu/fifo8.h"
+
+#define TYPE_BCM2835_SPI "bcm2835-spi"
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SPIState, BCM2835_SPI)
+
+/*
+ * Though BCM2835 documentation says FIFOs have a capacity of 16,
+ * FIFOs are actually 16 words in size or effectively 64 bytes when operating
+ * in non DMA mode.
+ */
+#define FIFO_SIZE 64
+#define FIFO_SIZE_3_4 48
+
+#define RO_MASK 0x1f0000
+
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+#define BCM2835_SPI_CS_RXF BIT(20)
+#define BCM2835_SPI_CS_RXR BIT(19)
+#define BCM2835_SPI_CS_TXD BIT(18)
+#define BCM2835_SPI_CS_RXD BIT(17)
+#define BCM2835_SPI_CS_DONE BIT(16)
+#define BCM2835_SPI_CS_LEN BIT(13)
+#define BCM2835_SPI_CS_REN BIT(12)
+#define BCM2835_SPI_CS_INTR BIT(10)
+#define BCM2835_SPI_CS_INTD BIT(9)
+#define BCM2835_SPI_CS_DMAEN BIT(8)
+#define BCM2835_SPI_CS_TA BIT(7)
+#define BCM2835_SPI_CLEAR_RX BIT(5)
+#define BCM2835_SPI_CLEAR_TX BIT(4)
+
+struct BCM2835SPIState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ SSIBus *bus;
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t cs;
+ uint32_t clk;
+ uint32_t dlen;
+ uint32_t ltoh;
+ uint32_t dc;
+
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+};
diff --git a/include/hw/ssi/ibex_spi_host.h b/include/hw/ssi/ibex_spi_host.h
new file mode 100644
index 0000000000..5bd5557b9a
--- /dev/null
+++ b/include/hw/ssi/ibex_spi_host.h
@@ -0,0 +1,92 @@
+
+/*
+ * QEMU model of the Ibex SPI Controller
+ * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
+ *
+ * Copyright (C) 2022 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef IBEX_SPI_HOST_H
+#define IBEX_SPI_HOST_H
+
+#include "hw/sysbus.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
+#include "qemu/timer.h"
+
+#define TYPE_IBEX_SPI_HOST "ibex-spi"
+#define IBEX_SPI_HOST(obj) \
+ OBJECT_CHECK(IbexSPIHostState, (obj), TYPE_IBEX_SPI_HOST)
+
+/* SPI Registers */
+#define IBEX_SPI_HOST_INTR_STATE (0x00 / 4) /* rw1c */
+#define IBEX_SPI_HOST_INTR_ENABLE (0x04 / 4) /* rw */
+#define IBEX_SPI_HOST_INTR_TEST (0x08 / 4) /* wo */
+#define IBEX_SPI_HOST_ALERT_TEST (0x0c / 4) /* wo */
+#define IBEX_SPI_HOST_CONTROL (0x10 / 4) /* rw */
+#define IBEX_SPI_HOST_STATUS (0x14 / 4) /* ro */
+#define IBEX_SPI_HOST_CONFIGOPTS (0x18 / 4) /* rw */
+#define IBEX_SPI_HOST_CSID (0x1c / 4) /* rw */
+#define IBEX_SPI_HOST_COMMAND (0x20 / 4) /* wo */
+/* RX/TX Modelled by FIFO */
+#define IBEX_SPI_HOST_RXDATA (0x24 / 4)
+#define IBEX_SPI_HOST_TXDATA (0x28 / 4)
+
+#define IBEX_SPI_HOST_ERROR_ENABLE (0x2c / 4) /* rw */
+#define IBEX_SPI_HOST_ERROR_STATUS (0x30 / 4) /* rw1c */
+#define IBEX_SPI_HOST_EVENT_ENABLE (0x34 / 4) /* rw */
+
+/* FIFO Len in Bytes */
+#define IBEX_SPI_HOST_TXFIFO_LEN 288
+#define IBEX_SPI_HOST_RXFIFO_LEN 256
+
+/* Max Register (Based on addr) */
+#define IBEX_SPI_HOST_MAX_REGS (IBEX_SPI_HOST_EVENT_ENABLE + 1)
+
+/* MISC */
+#define TX_INTERRUPT_TRIGGER_DELAY_NS 100
+#define BIDIRECTIONAL_TRANSFER 3
+
+typedef struct {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+ uint32_t regs[IBEX_SPI_HOST_MAX_REGS];
+ /* Multi-reg that sets config opts per CS */
+ uint32_t *config_opts;
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+ QEMUTimer *fifo_trigger_handle;
+
+ qemu_irq event;
+ qemu_irq host_err;
+ uint32_t num_cs;
+ qemu_irq *cs_lines;
+ SSIBus *ssi;
+
+ /* Used to track the init status, for replicating TXDATA ghost writes */
+ bool init_status;
+} IbexSPIHostState;
+
+#endif
diff --git a/include/hw/ssi/imx_spi.h b/include/hw/ssi/imx_spi.h
index 7103953581..eeaf49bbac 100644
--- a/include/hw/ssi/imx_spi.h
+++ b/include/hw/ssi/imx_spi.h
@@ -14,6 +14,7 @@
#include "hw/ssi/ssi.h"
#include "qemu/bitops.h"
#include "qemu/fifo32.h"
+#include "qom/object.h"
#define ECSPI_FIFO_SIZE 64
@@ -76,10 +77,13 @@
#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
+/* number of chip selects supported */
+#define ECSPI_NUM_CS 4
+
#define TYPE_IMX_SPI "imx.spi"
-#define IMX_SPI(obj) OBJECT_CHECK(IMXSPIState, (obj), TYPE_IMX_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXSPIState, IMX_SPI)
-typedef struct IMXSPIState {
+struct IMXSPIState {
/* <private> */
SysBusDevice parent_obj;
@@ -88,7 +92,7 @@ typedef struct IMXSPIState {
qemu_irq irq;
- qemu_irq cs_lines[4];
+ qemu_irq cs_lines[ECSPI_NUM_CS];
SSIBus *bus;
@@ -98,6 +102,6 @@ typedef struct IMXSPIState {
Fifo32 tx_fifo;
int16_t burst_length;
-} IMXSPIState;
+};
#endif /* IMX_SPI_H */
diff --git a/include/hw/ssi/mss-spi.h b/include/hw/ssi/mss-spi.h
index f0cf3243e0..ce6279c431 100644
--- a/include/hw/ssi/mss-spi.h
+++ b/include/hw/ssi/mss-spi.h
@@ -28,13 +28,14 @@
#include "hw/sysbus.h"
#include "hw/ssi/ssi.h"
#include "qemu/fifo32.h"
+#include "qom/object.h"
#define TYPE_MSS_SPI "mss-spi"
-#define MSS_SPI(obj) OBJECT_CHECK(MSSSpiState, (obj), TYPE_MSS_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(MSSSpiState, MSS_SPI)
#define R_SPI_MAX 16
-typedef struct MSSSpiState {
+struct MSSSpiState {
SysBusDevice parent_obj;
MemoryRegion mmio;
@@ -53,6 +54,6 @@ typedef struct MSSSpiState {
bool enabled;
uint32_t regs[R_SPI_MAX];
-} MSSSpiState;
+};
#endif /* HW_MSS_SPI_H */
diff --git a/include/hw/ssi/npcm7xx_fiu.h b/include/hw/ssi/npcm7xx_fiu.h
new file mode 100644
index 0000000000..a3a1704289
--- /dev/null
+++ b/include/hw/ssi/npcm7xx_fiu.h
@@ -0,0 +1,73 @@
+/*
+ * Nuvoton NPCM7xx Flash Interface Unit (FIU)
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_FIU_H
+#define NPCM7XX_FIU_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_FIU_NR_REGS (0x7c / sizeof(uint32_t))
+
+typedef struct NPCM7xxFIUState NPCM7xxFIUState;
+
+/**
+ * struct NPCM7xxFIUFlash - Per-chipselect flash controller state.
+ * @direct_access: Memory region for direct flash access.
+ * @fiu: Pointer to flash controller shared state.
+ */
+typedef struct NPCM7xxFIUFlash {
+ MemoryRegion direct_access;
+ NPCM7xxFIUState *fiu;
+} NPCM7xxFIUFlash;
+
+/**
+ * NPCM7xxFIUState - Device state for one Flash Interface Unit.
+ * @parent: System bus device.
+ * @mmio: Memory region for register access.
+ * @cs_count: Number of flash chips that may be connected to this module.
+ * @active_cs: Currently active chip select, or -1 if no chip is selected.
+ * @cs_lines: GPIO lines that may be wired to flash chips.
+ * @flash: Array of @cs_count per-flash-chip state objects.
+ * @spi: The SPI bus mastered by this controller.
+ * @regs: Register contents.
+ *
+ * Each FIU has a shared bank of registers, and controls up to four chip
+ * selects. Each chip select has a dedicated memory region which may be used to
+ * read and write the flash connected to that chip select as if it were memory.
+ */
+struct NPCM7xxFIUState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+
+ int32_t cs_count;
+ int32_t active_cs;
+ qemu_irq *cs_lines;
+ NPCM7xxFIUFlash *flash;
+
+ SSIBus *spi;
+
+ uint32_t regs[NPCM7XX_FIU_NR_REGS];
+};
+
+#define TYPE_NPCM7XX_FIU "npcm7xx-fiu"
+#define NPCM7XX_FIU(obj) OBJECT_CHECK(NPCM7xxFIUState, (obj), TYPE_NPCM7XX_FIU)
+
+#endif /* NPCM7XX_FIU_H */
diff --git a/include/hw/ssi/npcm_pspi.h b/include/hw/ssi/npcm_pspi.h
new file mode 100644
index 0000000000..37cc784d96
--- /dev/null
+++ b/include/hw/ssi/npcm_pspi.h
@@ -0,0 +1,53 @@
+/*
+ * Nuvoton Peripheral SPI Module
+ *
+ * Copyright 2023 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM_PSPI_H
+#define NPCM_PSPI_H
+
+#include "hw/ssi/ssi.h"
+#include "hw/sysbus.h"
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM_PSPI_NR_REGS 3
+
+/**
+ * NPCMPSPIState - Device state for one Flash Interface Unit.
+ * @parent: System bus device.
+ * @mmio: Memory region for register access.
+ * @spi: The SPI bus mastered by this controller.
+ * @regs: Register contents.
+ * @irq: The interrupt request queue for this module.
+ *
+ * Each PSPI has a shared bank of registers, and controls up to four chip
+ * selects. Each chip select has a dedicated memory region which may be used to
+ * read and write the flash connected to that chip select as if it were memory.
+ */
+typedef struct NPCMPSPIState {
+ SysBusDevice parent;
+
+ MemoryRegion mmio;
+
+ SSIBus *spi;
+ uint16_t regs[NPCM_PSPI_NR_REGS];
+ qemu_irq irq;
+} NPCMPSPIState;
+
+#define TYPE_NPCM_PSPI "npcm-pspi"
+OBJECT_DECLARE_SIMPLE_TYPE(NPCMPSPIState, NPCM_PSPI)
+
+#endif /* NPCM_PSPI_H */
diff --git a/include/hw/ssi/pl022.h b/include/hw/ssi/pl022.h
index a080519366..25d58db5f3 100644
--- a/include/hw/ssi/pl022.h
+++ b/include/hw/ssi/pl022.h
@@ -9,9 +9,10 @@
* (at your option) any later version.
*/
-/* This is a model of the Arm PrimeCell PL022 synchronous serial port.
+/*
+ * This is a model of the Arm PrimeCell PL022 synchronous serial port.
* The PL022 TRM is:
- * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0194h/DDI0194H_ssp_pl022_trm.pdf
+ * https://developer.arm.com/documentation/ddi0194/latest
*
* QEMU interface:
* + sysbus IRQ: SSPINTR combined interrupt line
@@ -22,11 +23,12 @@
#define HW_SSI_PL022_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_PL022 "pl022"
-#define PL022(obj) OBJECT_CHECK(PL022State, (obj), TYPE_PL022)
+OBJECT_DECLARE_SIMPLE_TYPE(PL022State, PL022)
-typedef struct PL022State {
+struct PL022State {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -46,6 +48,6 @@ typedef struct PL022State {
uint16_t rx_fifo[8];
qemu_irq irq;
SSIBus *ssi;
-} PL022State;
+};
#endif
diff --git a/include/hw/ssi/sifive_spi.h b/include/hw/ssi/sifive_spi.h
new file mode 100644
index 0000000000..d0c40cdb11
--- /dev/null
+++ b/include/hw/ssi/sifive_spi.h
@@ -0,0 +1,50 @@
+/*
+ * QEMU model of the SiFive SPI Controller
+ *
+ * Copyright (c) 2021 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SIFIVE_SPI_H
+#define HW_SIFIVE_SPI_H
+
+#include "qemu/fifo8.h"
+#include "hw/sysbus.h"
+
+#define SIFIVE_SPI_REG_NUM (0x78 / 4)
+
+#define TYPE_SIFIVE_SPI "sifive.spi"
+#define SIFIVE_SPI(obj) OBJECT_CHECK(SiFiveSPIState, (obj), TYPE_SIFIVE_SPI)
+
+typedef struct SiFiveSPIState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+ qemu_irq irq;
+
+ uint32_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ Fifo8 tx_fifo;
+ Fifo8 rx_fifo;
+
+ uint32_t regs[SIFIVE_SPI_REG_NUM];
+} SiFiveSPIState;
+
+#endif /* HW_SIFIVE_SPI_H */
diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h
index eac168aa1d..3cdcbd5390 100644
--- a/include/hw/ssi/ssi.h
+++ b/include/hw/ssi/ssi.h
@@ -1,29 +1,26 @@
/* QEMU Synchronous Serial Interface support. */
-/* In principle SSI is a point-point interface. As such the qemu
- implementation has a single slave device on a "bus".
- However it is fairly common for boards to have multiple slaves
- connected to a single master, and select devices with an external
- chip select. This is implemented in qemu by having an explicit mux device.
- It is assumed that master and slave are both using the same transfer width.
- */
+/*
+ * In principle SSI is a point-point interface. As such the qemu
+ * implementation has a single peripheral on a "bus".
+ * However it is fairly common for boards to have multiple peripherals
+ * connected to a single master, and select devices with an external
+ * chip select. This is implemented in qemu by having an explicit mux device.
+ * It is assumed that master and peripheral are both using the same transfer
+ * width.
+ */
#ifndef QEMU_SSI_H
#define QEMU_SSI_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
-typedef struct SSISlave SSISlave;
-typedef struct SSISlaveClass SSISlaveClass;
typedef enum SSICSMode SSICSMode;
-#define TYPE_SSI_SLAVE "ssi-slave"
-#define SSI_SLAVE(obj) \
- OBJECT_CHECK(SSISlave, (obj), TYPE_SSI_SLAVE)
-#define SSI_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SSISlaveClass, (klass), TYPE_SSI_SLAVE)
-#define SSI_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SSISlaveClass, (obj), TYPE_SSI_SLAVE)
+#define TYPE_SSI_PERIPHERAL "ssi-peripheral"
+OBJECT_DECLARE_TYPE(SSIPeripheral, SSIPeripheralClass,
+ SSI_PERIPHERAL)
#define SSI_GPIO_CS "ssi-gpio-cs"
@@ -33,21 +30,21 @@ enum SSICSMode {
SSI_CS_HIGH,
};
-/* Slave devices. */
-struct SSISlaveClass {
+/* Peripherals. */
+struct SSIPeripheralClass {
DeviceClass parent_class;
- void (*realize)(SSISlave *dev, Error **errp);
+ void (*realize)(SSIPeripheral *dev, Error **errp);
/* if you have standard or no CS behaviour, just override transfer.
* This is called when the device cs is active (true by default).
*/
- uint32_t (*transfer)(SSISlave *dev, uint32_t val);
+ uint32_t (*transfer)(SSIPeripheral *dev, uint32_t val);
/* called when the CS line changes. Optional, devices only need to implement
* this if they have side effects associated with the cs line (beyond
* tristating the txrx lines).
*/
- int (*set_cs)(SSISlave *dev, bool select);
+ int (*set_cs)(SSIPeripheral *dev, bool select);
/* define whether or not CS exists and is active low/high */
SSICSMode cs_polarity;
@@ -56,30 +53,36 @@ struct SSISlaveClass {
* cs_polarity are unused if this is overwritten. Transfer_raw will
* always be called for the device for every txrx access to the parent bus
*/
- uint32_t (*transfer_raw)(SSISlave *dev, uint32_t val);
+ uint32_t (*transfer_raw)(SSIPeripheral *dev, uint32_t val);
};
-struct SSISlave {
+struct SSIPeripheral {
DeviceState parent_obj;
+ /* cache the class */
+ SSIPeripheralClass *spc;
+
/* Chip select state */
bool cs;
+
+ /* Chip select index */
+ uint8_t cs_index;
};
-extern const VMStateDescription vmstate_ssi_slave;
+extern const VMStateDescription vmstate_ssi_peripheral;
-#define VMSTATE_SSI_SLAVE(_field, _state) { \
+#define VMSTATE_SSI_PERIPHERAL(_field, _state) { \
.name = (stringify(_field)), \
- .size = sizeof(SSISlave), \
- .vmsd = &vmstate_ssi_slave, \
+ .size = sizeof(SSIPeripheral), \
+ .vmsd = &vmstate_ssi_peripheral, \
.flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, SSISlave), \
+ .offset = vmstate_offset_value(_state, _field, SSIPeripheral), \
}
-DeviceState *ssi_create_slave(SSIBus *bus, const char *name);
+DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name);
/**
- * ssi_realize_and_unref: realize and unref an SSI slave device
- * @dev: SSI slave device to realize
+ * ssi_realize_and_unref: realize and unref an SSI peripheral
+ * @dev: SSI peripheral to realize
* @bus: SSI bus to put it on
* @errp: error pointer
*
@@ -90,10 +93,10 @@ DeviceState *ssi_create_slave(SSIBus *bus, const char *name);
* This function is useful if you have created @dev via qdev_new()
* (which takes a reference to the device it returns to you), so that
* you can set properties on it before realizing it. If you don't need
- * to set properties then ssi_create_slave() is probably better (as it
+ * to set properties then ssi_create_peripheral() is probably better (as it
* does the create, init and realize in one step).
*
- * If you are embedding the SSI slave into another QOM device and
+ * If you are embedding the SSI peripheral into another QOM device and
* initialized it via some variant on object_initialize_child() then
* do not use this function, because that family of functions arrange
* for the only reference to the child device to be held by the parent
@@ -109,4 +112,6 @@ SSIBus *ssi_create_bus(DeviceState *parent, const char *name);
uint32_t ssi_transfer(SSIBus *bus, uint32_t val);
+DeviceState *ssi_get_cs(SSIBus *bus, uint8_t cs_index);
+
#endif
diff --git a/include/hw/ssi/stm32f2xx_spi.h b/include/hw/ssi/stm32f2xx_spi.h
index e24b007abf..3683b4ad32 100644
--- a/include/hw/ssi/stm32f2xx_spi.h
+++ b/include/hw/ssi/stm32f2xx_spi.h
@@ -27,6 +27,7 @@
#include "hw/sysbus.h"
#include "hw/ssi/ssi.h"
+#include "qom/object.h"
#define STM_SPI_CR1 0x00
#define STM_SPI_CR2 0x04
@@ -44,10 +45,9 @@
#define STM_SPI_SR_RXNE 1
#define TYPE_STM32F2XX_SPI "stm32f2xx-spi"
-#define STM32F2XX_SPI(obj) \
- OBJECT_CHECK(STM32F2XXSPIState, (obj), TYPE_STM32F2XX_SPI)
+OBJECT_DECLARE_SIMPLE_TYPE(STM32F2XXSPIState, STM32F2XX_SPI)
-typedef struct {
+struct STM32F2XXSPIState {
/* <private> */
SysBusDevice parent_obj;
@@ -66,6 +66,6 @@ typedef struct {
qemu_irq irq;
SSIBus *ssi;
-} STM32F2XXSPIState;
+};
#endif /* HW_STM32F2XX_SPI_H */
diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h
index 6a39b55a7b..7a754bf67a 100644
--- a/include/hw/ssi/xilinx_spips.h
+++ b/include/hw/ssi/xilinx_spips.h
@@ -29,11 +29,14 @@
#include "qemu/fifo32.h"
#include "hw/stream.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
typedef struct XilinxSPIPS XilinxSPIPS;
+/* For SPIPS, QSPIPS. */
#define XLNX_SPIPS_R_MAX (0x100 / 4)
-#define XLNX_ZYNQMP_SPIPS_R_MAX (0x830 / 4)
+/* For ZYNQMP_QSPIPS. */
+#define XLNX_ZYNQMP_SPIPS_R_MAX (0x200 / 4)
/* Bite off 4k chunks at a time */
#define LQSPI_CACHE_SIZE 1024
@@ -85,24 +88,25 @@ struct XilinxSPIPS {
bool man_start_com;
};
-typedef struct {
+struct XilinxQSPIPS {
XilinxSPIPS parent_obj;
uint8_t lqspi_buf[LQSPI_CACHE_SIZE];
hwaddr lqspi_cached_addr;
Error *migration_blocker;
bool mmio_execution_enabled;
-} XilinxQSPIPS;
+};
+typedef struct XilinxQSPIPS XilinxQSPIPS;
-typedef struct {
+struct XlnxZynqMPQSPIPS {
XilinxQSPIPS parent_obj;
- StreamSlave *dma;
+ StreamSink *dma;
int gqspi_irqline;
uint32_t regs[XLNX_ZYNQMP_SPIPS_R_MAX];
- /* GQSPI has seperate tx/rx fifos */
+ /* GQSPI has separate tx/rx fifos */
Fifo8 rx_fifo_g;
Fifo8 tx_fifo_g;
Fifo32 fifo_g;
@@ -117,32 +121,26 @@ typedef struct {
bool man_start_com_g;
uint32_t dma_burst_size;
uint8_t dma_buf[QSPI_DMA_MAX_BURST_SIZE];
-} XlnxZynqMPQSPIPS;
+};
-typedef struct XilinxSPIPSClass {
+struct XilinxSPIPSClass {
SysBusDeviceClass parent_class;
const MemoryRegionOps *reg_ops;
+ uint64_t reg_size;
uint32_t rx_fifo_size;
uint32_t tx_fifo_size;
-} XilinxSPIPSClass;
+};
#define TYPE_XILINX_SPIPS "xlnx.ps7-spi"
#define TYPE_XILINX_QSPIPS "xlnx.ps7-qspi"
#define TYPE_XLNX_ZYNQMP_QSPIPS "xlnx.usmp-gqspi"
-#define XILINX_SPIPS(obj) \
- OBJECT_CHECK(XilinxSPIPS, (obj), TYPE_XILINX_SPIPS)
-#define XILINX_SPIPS_CLASS(klass) \
- OBJECT_CLASS_CHECK(XilinxSPIPSClass, (klass), TYPE_XILINX_SPIPS)
-#define XILINX_SPIPS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XilinxSPIPSClass, (obj), TYPE_XILINX_SPIPS)
+OBJECT_DECLARE_TYPE(XilinxSPIPS, XilinxSPIPSClass, XILINX_SPIPS)
-#define XILINX_QSPIPS(obj) \
- OBJECT_CHECK(XilinxQSPIPS, (obj), TYPE_XILINX_QSPIPS)
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxQSPIPS, XILINX_QSPIPS)
-#define XLNX_ZYNQMP_QSPIPS(obj) \
- OBJECT_CHECK(XlnxZynqMPQSPIPS, (obj), TYPE_XLNX_ZYNQMP_QSPIPS)
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPQSPIPS, XLNX_ZYNQMP_QSPIPS)
#endif /* XILINX_SPIPS_H */
diff --git a/include/hw/ssi/xlnx-versal-ospi.h b/include/hw/ssi/xlnx-versal-ospi.h
new file mode 100644
index 0000000000..4ac975aa2f
--- /dev/null
+++ b/include/hw/ssi/xlnx-versal-ospi.h
@@ -0,0 +1,111 @@
+/*
+ * Header file for the Xilinx Versal's OSPI controller
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Francisco Iglesias <francisco.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's Octal SPI flash memory controller
+ * documented in Versal's Technical Reference manual [1] and the Versal ACAP
+ * Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/OSPI-Module
+ *
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus MMIO region 1: MemoryRegion for flash memory linear address space
+ * (data transfer).
+ * + sysbus IRQ 0: Device interrupt.
+ * + Named GPIO input "ospi-mux-sel": 0: enables indirect access mode
+ * and 1: enables direct access mode.
+ * + Property "dac-with-indac": Allow both direct accesses and indirect
+ * accesses simultaneously.
+ * + Property "indac-write-disabled": Disable indirect access writes.
+ */
+
+#ifndef XLNX_VERSAL_OSPI_H
+#define XLNX_VERSAL_OSPI_H
+
+#include "hw/register.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/fifo8.h"
+#include "hw/dma/xlnx_csu_dma.h"
+
+#define TYPE_XILINX_VERSAL_OSPI "xlnx.versal-ospi"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalOspi, XILINX_VERSAL_OSPI)
+
+#define XILINX_VERSAL_OSPI_R_MAX (0xfc / 4 + 1)
+
+/*
+ * Indirect operations
+ */
+typedef struct IndOp {
+ uint32_t flash_addr;
+ uint32_t num_bytes;
+ uint32_t done_bytes;
+ bool completed;
+} IndOp;
+
+struct XlnxVersalOspi {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ MemoryRegion iomem_dac;
+
+ uint8_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+
+ Fifo8 rx_sram;
+ Fifo8 tx_sram;
+
+ qemu_irq irq;
+
+ XlnxCSUDMA *dma_src;
+ bool ind_write_disabled;
+ bool dac_with_indac;
+ bool dac_enable;
+ bool src_dma_inprog;
+
+ IndOp rd_ind_op[2];
+ IndOp wr_ind_op[2];
+
+ uint32_t regs[XILINX_VERSAL_OSPI_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_OSPI_R_MAX];
+
+ /* Maximum inferred membank size is 512 bytes */
+ uint8_t stig_membank[512];
+};
+
+#endif /* XLNX_VERSAL_OSPI_H */
diff --git a/include/hw/stream.h b/include/hw/stream.h
index ed09e83683..f166facb09 100644
--- a/include/hw/stream.h
+++ b/include/hw/stream.h
@@ -3,52 +3,50 @@
#include "qom/object.h"
-/* stream slave. Used until qdev provides a generic way. */
-#define TYPE_STREAM_SLAVE "stream-slave"
+#define TYPE_STREAM_SINK "stream-sink"
-#define STREAM_SLAVE_CLASS(klass) \
- OBJECT_CLASS_CHECK(StreamSlaveClass, (klass), TYPE_STREAM_SLAVE)
-#define STREAM_SLAVE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(StreamSlaveClass, (obj), TYPE_STREAM_SLAVE)
-#define STREAM_SLAVE(obj) \
- INTERFACE_CHECK(StreamSlave, (obj), TYPE_STREAM_SLAVE)
+typedef struct StreamSinkClass StreamSinkClass;
+DECLARE_CLASS_CHECKERS(StreamSinkClass, STREAM_SINK,
+ TYPE_STREAM_SINK)
+#define STREAM_SINK(obj) \
+ INTERFACE_CHECK(StreamSink, (obj), TYPE_STREAM_SINK)
-typedef struct StreamSlave StreamSlave;
+typedef struct StreamSink StreamSink;
typedef void (*StreamCanPushNotifyFn)(void *opaque);
-typedef struct StreamSlaveClass {
+struct StreamSinkClass {
InterfaceClass parent;
/**
- * can push - determine if a stream slave is capable of accepting at least
+ * can push - determine if a stream sink is capable of accepting at least
* one byte of data. Returns false if cannot accept. If not implemented, the
- * slave is assumed to always be capable of receiving.
- * @notify: Optional callback that the slave will call when the slave is
+ * sink is assumed to always be capable of receiving.
+ * @notify: Optional callback that the sink will call when the sink is
* capable of receiving again. Only called if false is returned.
* @notify_opaque: opaque data to pass to notify call.
*/
- bool (*can_push)(StreamSlave *obj, StreamCanPushNotifyFn notify,
+ bool (*can_push)(StreamSink *obj, StreamCanPushNotifyFn notify,
void *notify_opaque);
/**
- * push - push data to a Stream slave. The number of bytes pushed is
- * returned. If the slave short returns, the master must wait before trying
- * again, the slave may continue to just return 0 waiting for the vm time to
+ * push - push data to a Stream sink. The number of bytes pushed is
+ * returned. If the sink short returns, the master must wait before trying
+ * again, the sink may continue to just return 0 waiting for the vm time to
* advance. The can_push() function can be used to trap the point in time
- * where the slave is ready to receive again, otherwise polling on a QEMU
+ * where the sink is ready to receive again, otherwise polling on a QEMU
* timer will work.
- * @obj: Stream slave to push to
+ * @obj: Stream sink to push to
* @buf: Data to write
* @len: Maximum number of bytes to write
* @eop: End of packet flag
*/
- size_t (*push)(StreamSlave *obj, unsigned char *buf, size_t len, bool eop);
-} StreamSlaveClass;
+ size_t (*push)(StreamSink *obj, unsigned char *buf, size_t len, bool eop);
+};
size_t
-stream_push(StreamSlave *sink, uint8_t *buf, size_t len, bool eop);
+stream_push(StreamSink *sink, uint8_t *buf, size_t len, bool eop);
bool
-stream_can_push(StreamSlave *sink, StreamCanPushNotifyFn notify,
+stream_can_push(StreamSink *sink, StreamCanPushNotifyFn notify,
void *notify_opaque);
diff --git a/include/hw/sysbus.h b/include/hw/sysbus.h
index da9f85c58c..3cb29a480e 100644
--- a/include/hw/sysbus.h
+++ b/include/hw/sysbus.h
@@ -5,22 +5,19 @@
#include "hw/qdev-core.h"
#include "exec/memory.h"
+#include "qom/object.h"
#define QDEV_MAX_MMIO 32
#define QDEV_MAX_PIO 32
#define TYPE_SYSTEM_BUS "System"
-#define SYSTEM_BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_SYSTEM_BUS)
+DECLARE_INSTANCE_CHECKER(BusState, SYSTEM_BUS,
+ TYPE_SYSTEM_BUS)
-typedef struct SysBusDevice SysBusDevice;
#define TYPE_SYS_BUS_DEVICE "sys-bus-device"
-#define SYS_BUS_DEVICE(obj) \
- OBJECT_CHECK(SysBusDevice, (obj), TYPE_SYS_BUS_DEVICE)
-#define SYS_BUS_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(SysBusDeviceClass, (klass), TYPE_SYS_BUS_DEVICE)
-#define SYS_BUS_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(SysBusDeviceClass, (obj), TYPE_SYS_BUS_DEVICE)
+OBJECT_DECLARE_TYPE(SysBusDevice, SysBusDeviceClass,
+ SYS_BUS_DEVICE)
/**
* SysBusDeviceClass:
@@ -31,7 +28,7 @@ typedef struct SysBusDevice SysBusDevice;
#define SYSBUS_DEVICE_GPIO_IRQ "sysbus-irq"
-typedef struct SysBusDeviceClass {
+struct SysBusDeviceClass {
/*< private >*/
DeviceClass parent_class;
@@ -52,7 +49,7 @@ typedef struct SysBusDeviceClass {
*/
char *(*explicit_ofw_unit_address)(const SysBusDevice *dev);
void (*connect_irq_notifier)(SysBusDevice *dev, qemu_irq irq);
-} SysBusDeviceClass;
+};
struct SysBusDevice {
/*< private >*/
@@ -86,9 +83,6 @@ void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr);
void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr,
int priority);
void sysbus_mmio_unmap(SysBusDevice *dev, int n);
-void sysbus_add_io(SysBusDevice *dev, hwaddr addr,
- MemoryRegion *mem);
-MemoryRegion *sysbus_address_space(SysBusDevice *dev);
bool sysbus_realize(SysBusDevice *dev, Error **errp);
bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp);
diff --git a/include/hw/timer/a9gtimer.h b/include/hw/timer/a9gtimer.h
index 81c4388784..6ae9122e4b 100644
--- a/include/hw/timer/a9gtimer.h
+++ b/include/hw/timer/a9gtimer.h
@@ -24,11 +24,12 @@
#define A9GTIMER_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define A9_GTIMER_MAX_CPUS 4
#define TYPE_A9_GTIMER "arm.cortex-a9-global-timer"
-#define A9_GTIMER(obj) OBJECT_CHECK(A9GTimerState, (obj), TYPE_A9_GTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(A9GTimerState, A9_GTIMER)
#define R_COUNTER_LO 0x00
#define R_COUNTER_HI 0x04
@@ -55,7 +56,6 @@
#define R_AUTO_INCREMENT 0x18
typedef struct A9GTimerPerCPU A9GTimerPerCPU;
-typedef struct A9GTimerState A9GTimerState;
struct A9GTimerPerCPU {
A9GTimerState *parent;
diff --git a/include/hw/timer/allwinner-a10-pit.h b/include/hw/timer/allwinner-a10-pit.h
index 871c95b512..8435758ad6 100644
--- a/include/hw/timer/allwinner-a10-pit.h
+++ b/include/hw/timer/allwinner-a10-pit.h
@@ -3,9 +3,10 @@
#include "hw/ptimer.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_AW_A10_PIT "allwinner-A10-timer"
-#define AW_A10_PIT(obj) OBJECT_CHECK(AwA10PITState, (obj), TYPE_AW_A10_PIT)
+OBJECT_DECLARE_SIMPLE_TYPE(AwA10PITState, AW_A10_PIT)
#define AW_A10_PIT_TIMER_NR 6
#define AW_A10_PIT_TIMER_IRQ 0x1
@@ -36,7 +37,6 @@
#define AW_A10_PIT_DEFAULT_CLOCK 0x4
-typedef struct AwA10PITState AwA10PITState;
typedef struct AwA10TimerContext {
AwA10PITState *container;
diff --git a/include/hw/timer/arm_mptimer.h b/include/hw/timer/arm_mptimer.h
index c46d8d2309..65a96e2a0d 100644
--- a/include/hw/timer/arm_mptimer.h
+++ b/include/hw/timer/arm_mptimer.h
@@ -22,6 +22,7 @@
#define HW_TIMER_ARM_MPTIMER_H
#include "hw/sysbus.h"
+#include "qom/object.h"
#define ARM_MPTIMER_MAX_CPUS 4
@@ -35,10 +36,9 @@ typedef struct {
} TimerBlock;
#define TYPE_ARM_MPTIMER "arm_mptimer"
-#define ARM_MPTIMER(obj) \
- OBJECT_CHECK(ARMMPTimerState, (obj), TYPE_ARM_MPTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(ARMMPTimerState, ARM_MPTIMER)
-typedef struct {
+struct ARMMPTimerState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -46,6 +46,6 @@ typedef struct {
uint32_t num_cpu;
TimerBlock timerblock[ARM_MPTIMER_MAX_CPUS];
MemoryRegion iomem;
-} ARMMPTimerState;
+};
#endif
diff --git a/include/hw/timer/armv7m_systick.h b/include/hw/timer/armv7m_systick.h
index 25e5ceacc8..ee09b13881 100644
--- a/include/hw/timer/armv7m_systick.h
+++ b/include/hw/timer/armv7m_systick.h
@@ -13,12 +13,26 @@
#define HW_TIMER_ARMV7M_SYSTICK_H
#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/ptimer.h"
+#include "hw/clock.h"
#define TYPE_SYSTICK "armv7m_systick"
-#define SYSTICK(obj) OBJECT_CHECK(SysTickState, (obj), TYPE_SYSTICK)
+OBJECT_DECLARE_SIMPLE_TYPE(SysTickState, SYSTICK)
-typedef struct SysTickState {
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0 is the register interface (covering
+ * the registers which are mapped at address 0xE000E010)
+ * + sysbus IRQ 0 is the interrupt line to the NVIC
+ * + Clock input "refclk" is the external reference clock
+ * (used when SYST_CSR.CLKSOURCE == 0)
+ * + Clock input "cpuclk" is the main CPU clock
+ * (used when SYST_CSR.CLKSOURCE == 1)
+ */
+
+struct SysTickState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -26,31 +40,11 @@ typedef struct SysTickState {
uint32_t control;
uint32_t reload;
int64_t tick;
- QEMUTimer *timer;
+ ptimer_state *ptimer;
MemoryRegion iomem;
qemu_irq irq;
-} SysTickState;
-
-/*
- * Multiplication factor to convert from system clock ticks to qemu timer
- * ticks. This should be set (by board code, usually) to a value
- * equal to NANOSECONDS_PER_SECOND / frq, where frq is the clock frequency
- * in Hz of the CPU.
- *
- * This value is used by the systick device when it is running in
- * its "use the CPU clock" mode (ie when SYST_CSR.CLKSOURCE == 1) to
- * set how fast the timer should tick.
- *
- * TODO: we should refactor this so that rather than using a global
- * we use a device property or something similar. This is complicated
- * because (a) the property would need to be plumbed through from the
- * board code down through various layers to the systick device
- * and (b) the property needs to be modifiable after realize, because
- * the stellaris board uses this to implement the behaviour where the
- * guest can reprogram the PLL registers to downclock the CPU, and the
- * systick device needs to react accordingly. Possibly this should
- * be deferred until we have a good API for modelling clock trees.
- */
-extern int system_clock_scale;
+ Clock *refclk;
+ Clock *cpuclk;
+};
#endif
diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h
index 948329893c..07dc6b6f2c 100644
--- a/include/hw/timer/aspeed_timer.h
+++ b/include/hw/timer/aspeed_timer.h
@@ -24,13 +24,14 @@
#include "qemu/timer.h"
#include "hw/misc/aspeed_scu.h"
+#include "qom/object.h"
-#define ASPEED_TIMER(obj) \
- OBJECT_CHECK(AspeedTimerCtrlState, (obj), TYPE_ASPEED_TIMER);
#define TYPE_ASPEED_TIMER "aspeed.timer"
+OBJECT_DECLARE_TYPE(AspeedTimerCtrlState, AspeedTimerClass, ASPEED_TIMER)
#define TYPE_ASPEED_2400_TIMER TYPE_ASPEED_TIMER "-ast2400"
#define TYPE_ASPEED_2500_TIMER TYPE_ASPEED_TIMER "-ast2500"
#define TYPE_ASPEED_2600_TIMER TYPE_ASPEED_TIMER "-ast2600"
+#define TYPE_ASPEED_1030_TIMER TYPE_ASPEED_TIMER "-ast1030"
#define ASPEED_TIMER_NR_TIMERS 8
@@ -50,7 +51,7 @@ typedef struct AspeedTimer {
uint64_t start;
} AspeedTimer;
-typedef struct AspeedTimerCtrlState {
+struct AspeedTimerCtrlState {
/*< private >*/
SysBusDevice parent;
@@ -64,18 +65,14 @@ typedef struct AspeedTimerCtrlState {
AspeedTimer timers[ASPEED_TIMER_NR_TIMERS];
AspeedSCUState *scu;
-} AspeedTimerCtrlState;
+};
-#define ASPEED_TIMER_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedTimerClass, (klass), TYPE_ASPEED_TIMER)
-#define ASPEED_TIMER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedTimerClass, (obj), TYPE_ASPEED_TIMER)
-typedef struct AspeedTimerClass {
+struct AspeedTimerClass {
SysBusDeviceClass parent_class;
uint64_t (*read)(AspeedTimerCtrlState *s, hwaddr offset);
void (*write)(AspeedTimerCtrlState *s, hwaddr offset, uint64_t value);
-} AspeedTimerClass;
+};
#endif /* ASPEED_TIMER_H */
diff --git a/include/hw/timer/avr_timer16.h b/include/hw/timer/avr_timer16.h
index 982019d242..a1a032a24d 100644
--- a/include/hw/timer/avr_timer16.h
+++ b/include/hw/timer/avr_timer16.h
@@ -30,7 +30,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
-#include "hw/hw.h"
+#include "qom/object.h"
enum NextInterrupt {
OVERFLOW,
@@ -41,10 +41,9 @@ enum NextInterrupt {
};
#define TYPE_AVR_TIMER16 "avr-timer16"
-#define AVR_TIMER16(obj) \
- OBJECT_CHECK(AVRTimer16State, (obj), TYPE_AVR_TIMER16)
+OBJECT_DECLARE_SIMPLE_TYPE(AVRTimer16State, AVR_TIMER16)
-typedef struct AVRTimer16State {
+struct AVRTimer16State {
/* <private> */
SysBusDevice parent_obj;
@@ -89,6 +88,6 @@ typedef struct AVRTimer16State {
uint64_t period_ns;
uint64_t reset_time_ns;
enum NextInterrupt next_interrupt;
-} AVRTimer16State;
+};
#endif /* HW_TIMER_AVR_TIMER16_H */
diff --git a/include/hw/timer/bcm2835_systmr.h b/include/hw/timer/bcm2835_systmr.h
index c0bc5c8127..a8f605beeb 100644
--- a/include/hw/timer/bcm2835_systmr.h
+++ b/include/hw/timer/bcm2835_systmr.h
@@ -6,28 +6,37 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef BCM2835_SYSTIMER_H
-#define BCM2835_SYSTIMER_H
+#ifndef BCM2835_SYSTMR_H
+#define BCM2835_SYSTMR_H
#include "hw/sysbus.h"
#include "hw/irq.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_BCM2835_SYSTIMER "bcm2835-sys-timer"
-#define BCM2835_SYSTIMER(obj) \
- OBJECT_CHECK(BCM2835SystemTimerState, (obj), TYPE_BCM2835_SYSTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835SystemTimerState, BCM2835_SYSTIMER)
+
+#define BCM2835_SYSTIMER_COUNT 4
typedef struct {
+ unsigned id;
+ QEMUTimer timer;
+ qemu_irq irq;
+ BCM2835SystemTimerState *state;
+} BCM2835SystemTimerCompare;
+
+struct BCM2835SystemTimerState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
- qemu_irq irq;
-
struct {
- uint32_t status;
- uint32_t compare[4];
+ uint32_t ctrl_status;
+ uint32_t compare[BCM2835_SYSTIMER_COUNT];
} reg;
-} BCM2835SystemTimerState;
+ BCM2835SystemTimerCompare tmr[BCM2835_SYSTIMER_COUNT];
+};
#endif
diff --git a/include/hw/timer/cadence_ttc.h b/include/hw/timer/cadence_ttc.h
new file mode 100644
index 0000000000..e1251383f2
--- /dev/null
+++ b/include/hw/timer/cadence_ttc.h
@@ -0,0 +1,54 @@
+/*
+ * Xilinx Zynq cadence TTC model
+ *
+ * Copyright (c) 2011 Xilinx Inc.
+ * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com)
+ * Copyright (c) 2012 PetaLogix Pty Ltd.
+ * Written By Haibing Ma
+ * M. Habib
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HW_TIMER_CADENCE_TTC_H
+#define HW_TIMER_CADENCE_TTC_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+typedef struct {
+ QEMUTimer *timer;
+ int freq;
+
+ uint32_t reg_clock;
+ uint32_t reg_count;
+ uint32_t reg_value;
+ uint16_t reg_interval;
+ uint16_t reg_match[3];
+ uint32_t reg_intr;
+ uint32_t reg_intr_en;
+ uint32_t reg_event_ctrl;
+ uint32_t reg_event;
+
+ uint64_t cpu_time;
+ unsigned int cpu_time_valid;
+
+ qemu_irq irq;
+} CadenceTimerState;
+
+#define TYPE_CADENCE_TTC "cadence_ttc"
+OBJECT_DECLARE_SIMPLE_TYPE(CadenceTTCState, CADENCE_TTC)
+
+struct CadenceTTCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ CadenceTimerState timer[3];
+};
+
+#endif
diff --git a/include/hw/timer/cmsdk-apb-dualtimer.h b/include/hw/timer/cmsdk-apb-dualtimer.h
index 9843a9dbb1..f3ec86c00b 100644
--- a/include/hw/timer/cmsdk-apb-dualtimer.h
+++ b/include/hw/timer/cmsdk-apb-dualtimer.h
@@ -16,7 +16,7 @@
* https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
*
* QEMU interface:
- * + QOM property "pclk-frq": frequency at which the timer is clocked
+ * + Clock input "TIMCLK": clock (for both timers)
* + sysbus MMIO region 0: the register bank
* + sysbus IRQ 0: combined timer interrupt TIMINTC
* + sysbus IRO 1: timer block 1 interrupt TIMINT1
@@ -28,12 +28,12 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_DUALTIMER "cmsdk-apb-dualtimer"
-#define CMSDK_APB_DUALTIMER(obj) OBJECT_CHECK(CMSDKAPBDualTimer, (obj), \
- TYPE_CMSDK_APB_DUALTIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBDualTimer, CMSDK_APB_DUALTIMER)
-typedef struct CMSDKAPBDualTimer CMSDKAPBDualTimer;
/* One of the two identical timer modules in the dual-timer module */
typedef struct CMSDKAPBDualTimerModule {
@@ -62,7 +62,7 @@ struct CMSDKAPBDualTimer {
/*< public >*/
MemoryRegion iomem;
qemu_irq timerintc;
- uint32_t pclk_frq;
+ Clock *timclk;
CMSDKAPBDualTimerModule timermod[CMSDK_APB_DUALTIMER_NUM_MODULES];
uint32_t timeritcr;
diff --git a/include/hw/timer/cmsdk-apb-timer.h b/include/hw/timer/cmsdk-apb-timer.h
index f24bda6a46..2dd615d1be 100644
--- a/include/hw/timer/cmsdk-apb-timer.h
+++ b/include/hw/timer/cmsdk-apb-timer.h
@@ -12,49 +12,34 @@
#ifndef CMSDK_APB_TIMER_H
#define CMSDK_APB_TIMER_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_TIMER "cmsdk-apb-timer"
-#define CMSDK_APB_TIMER(obj) OBJECT_CHECK(CMSDKAPBTIMER, (obj), \
- TYPE_CMSDK_APB_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBTimer, CMSDK_APB_TIMER)
-typedef struct {
+/*
+ * QEMU interface:
+ * + Clock input "pclk": clock for the timer
+ * + sysbus MMIO region 0: the register bank
+ * + sysbus IRQ 0: timer interrupt TIMERINT
+ */
+struct CMSDKAPBTimer {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
qemu_irq timerint;
- uint32_t pclk_frq;
struct ptimer_state *timer;
+ Clock *pclk;
uint32_t ctrl;
uint32_t value;
uint32_t reload;
uint32_t intstatus;
-} CMSDKAPBTIMER;
-
-/**
- * cmsdk_apb_timer_create - convenience function to create TYPE_CMSDK_APB_TIMER
- * @addr: location in system memory to map registers
- * @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
- */
-static inline DeviceState *cmsdk_apb_timer_create(hwaddr addr,
- qemu_irq timerint,
- uint32_t pclk_frq)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new(TYPE_CMSDK_APB_TIMER);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, timerint);
- return dev;
-}
+};
#endif
diff --git a/include/hw/timer/digic-timer.h b/include/hw/timer/digic-timer.h
index d9e67fe291..da82fb4663 100644
--- a/include/hw/timer/digic-timer.h
+++ b/include/hw/timer/digic-timer.h
@@ -20,9 +20,10 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "qom/object.h"
#define TYPE_DIGIC_TIMER "digic-timer"
-#define DIGIC_TIMER(obj) OBJECT_CHECK(DigicTimerState, (obj), TYPE_DIGIC_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(DigicTimerState, DIGIC_TIMER)
#define DIGIC_TIMER_CONTROL 0x00
#define DIGIC_TIMER_CONTROL_RST 0x80000000
@@ -30,7 +31,7 @@
#define DIGIC_TIMER_RELVALUE 0x08
#define DIGIC_TIMER_VALUE 0x0c
-typedef struct DigicTimerState {
+struct DigicTimerState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -40,6 +41,6 @@ typedef struct DigicTimerState {
uint32_t control;
uint32_t relvalue;
-} DigicTimerState;
+};
#endif /* HW_TIMER_DIGIC_TIMER_H */
diff --git a/include/hw/timer/grlib_gptimer.h b/include/hw/timer/grlib_gptimer.h
new file mode 100644
index 0000000000..e56f1b8bf3
--- /dev/null
+++ b/include/hw/timer/grlib_gptimer.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU GRLIB GPTimer
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2024 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef GRLIB_GPTIMER_H
+#define GRLIB_GPTIMER_H
+
+#define TYPE_GRLIB_GPTIMER "grlib-gptimer"
+
+#endif
diff --git a/include/hw/timer/hpet.h b/include/hw/timer/hpet.h
index f04c4d3238..d17a8d4319 100644
--- a/include/hw/timer/hpet.h
+++ b/include/hw/timer/hpet.h
@@ -78,6 +78,8 @@ extern struct hpet_fw_config hpet_cfg;
#define TYPE_HPET "hpet"
+#define HPET_INTCAP "hpet-intcap"
+
static inline bool hpet_find(void)
{
return object_resolve_path_type("", TYPE_HPET, NULL);
diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h
index e75b4a5a08..8402caad30 100644
--- a/include/hw/timer/i8254.h
+++ b/include/hw/timer/i8254.h
@@ -28,6 +28,7 @@
#include "hw/qdev-properties.h"
#include "hw/isa/isa.h"
#include "qapi/error.h"
+#include "qom/object.h"
#define PIT_FREQ 1193182
@@ -39,12 +40,7 @@ typedef struct PITChannelInfo {
} PITChannelInfo;
#define TYPE_PIT_COMMON "pit-common"
-#define PIT_COMMON(obj) \
- OBJECT_CHECK(PITCommonState, (obj), TYPE_PIT_COMMON)
-#define PIT_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(PITCommonClass, (klass), TYPE_PIT_COMMON)
-#define PIT_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PITCommonClass, (obj), TYPE_PIT_COMMON)
+OBJECT_DECLARE_TYPE(PITCommonState, PITCommonClass, PIT_COMMON)
#define TYPE_I8254 "isa-pit"
#define TYPE_KVM_I8254 "kvm-pit"
@@ -60,7 +56,8 @@ static inline ISADevice *i8254_pit_init(ISABus *bus, int base, int isa_irq,
qdev_prop_set_uint32(dev, "iobase", base);
isa_realize_and_unref(d, bus, &error_fatal);
qdev_connect_gpio_out(dev, 0,
- isa_irq >= 0 ? isa_get_irq(d, isa_irq) : alt_irq);
+ isa_irq >= 0 ? isa_bus_get_irq(bus, isa_irq)
+ : alt_irq);
return d;
}
diff --git a/include/hw/timer/i8254_internal.h b/include/hw/timer/i8254_internal.h
index 3db462aecd..1761deb4cf 100644
--- a/include/hw/timer/i8254_internal.h
+++ b/include/hw/timer/i8254_internal.h
@@ -50,22 +50,22 @@ typedef struct PITChannelState {
uint32_t irq_disabled;
} PITChannelState;
-typedef struct PITCommonState {
+struct PITCommonState {
ISADevice dev;
MemoryRegion ioports;
uint32_t iobase;
PITChannelState channels[3];
-} PITCommonState;
+};
-typedef struct PITCommonClass {
- ISADeviceClass parent_class;
+struct PITCommonClass {
+ DeviceClass parent_class;
void (*set_channel_gate)(PITCommonState *s, PITChannelState *sc, int val);
void (*get_channel_info)(PITCommonState *s, PITChannelState *sc,
PITChannelInfo *info);
void (*pre_save)(PITCommonState *s);
void (*post_load)(PITCommonState *s);
-} PITCommonClass;
+};
int pit_get_out(PITChannelState *s, int64_t current_time);
int64_t pit_get_next_transition_time(PITChannelState *s, int64_t current_time);
diff --git a/include/hw/timer/ibex_timer.h b/include/hw/timer/ibex_timer.h
new file mode 100644
index 0000000000..41f5c82a92
--- /dev/null
+++ b/include/hw/timer/ibex_timer.h
@@ -0,0 +1,55 @@
+/*
+ * QEMU lowRISC Ibex Timer device
+ *
+ * Copyright (c) 2021 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_IBEX_TIMER_H
+#define HW_IBEX_TIMER_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_IBEX_TIMER "ibex-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(IbexTimerState, IBEX_TIMER)
+
+struct IbexTimerState {
+ /* <private> */
+ SysBusDevice parent_obj;
+ uint64_t mtimecmp;
+ QEMUTimer *mtimer; /* Internal timer for M-mode interrupt */
+
+ /* <public> */
+ MemoryRegion mmio;
+
+ uint32_t timer_ctrl;
+ uint32_t timer_cfg0;
+ uint32_t timer_compare_lower0;
+ uint32_t timer_compare_upper0;
+ uint32_t timer_intr_enable;
+ uint32_t timer_intr_state;
+
+ uint32_t timebase_freq;
+
+ qemu_irq irq;
+
+ qemu_irq m_timer_irq;
+};
+#endif /* HW_IBEX_TIMER_H */
diff --git a/include/hw/timer/imx_epit.h b/include/hw/timer/imx_epit.h
index 0730ac35e6..79aff0cec2 100644
--- a/include/hw/timer/imx_epit.h
+++ b/include/hw/timer/imx_epit.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
/*
* EPIT: Enhanced periodic interrupt timer
@@ -42,7 +43,7 @@
#define CR_OCIEN (1 << 2)
#define CR_RLD (1 << 3)
#define CR_PRESCALE_SHIFT (4)
-#define CR_PRESCALE_MASK (0xfff)
+#define CR_PRESCALE_BITS (12)
#define CR_SWR (1 << 16)
#define CR_IOVW (1 << 17)
#define CR_DBGEN (1 << 18)
@@ -50,14 +51,16 @@
#define CR_DOZEN (1 << 20)
#define CR_STOPEN (1 << 21)
#define CR_CLKSRC_SHIFT (24)
-#define CR_CLKSRC_MASK (0x3 << CR_CLKSRC_SHIFT)
+#define CR_CLKSRC_BITS (2)
+
+#define SR_OCIF (1 << 0)
#define EPIT_TIMER_MAX 0XFFFFFFFFUL
#define TYPE_IMX_EPIT "imx.epit"
-#define IMX_EPIT(obj) OBJECT_CHECK(IMXEPITState, (obj), TYPE_IMX_EPIT)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXEPITState, IMX_EPIT)
-typedef struct IMXEPITState{
+struct IMXEPITState {
/*< private >*/
SysBusDevice parent_obj;
@@ -71,10 +74,8 @@ typedef struct IMXEPITState{
uint32_t sr;
uint32_t lr;
uint32_t cmp;
- uint32_t cnt;
- uint32_t freq;
qemu_irq irq;
-} IMXEPITState;
+};
#endif /* IMX_EPIT_H */
diff --git a/include/hw/timer/imx_gpt.h b/include/hw/timer/imx_gpt.h
index 20ccb327c4..5a1230da35 100644
--- a/include/hw/timer/imx_gpt.h
+++ b/include/hw/timer/imx_gpt.h
@@ -32,6 +32,7 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "hw/misc/imx_ccm.h"
+#include "qom/object.h"
/*
* GPT : General purpose timer
@@ -77,13 +78,16 @@
#define TYPE_IMX25_GPT "imx25.gpt"
#define TYPE_IMX31_GPT "imx31.gpt"
#define TYPE_IMX6_GPT "imx6.gpt"
+#define TYPE_IMX6UL_GPT "imx6ul.gpt"
#define TYPE_IMX7_GPT "imx7.gpt"
#define TYPE_IMX_GPT TYPE_IMX25_GPT
-#define IMX_GPT(obj) OBJECT_CHECK(IMXGPTState, (obj), TYPE_IMX_GPT)
+typedef struct IMXGPTState IMXGPTState;
+DECLARE_INSTANCE_CHECKER(IMXGPTState, IMX_GPT,
+ TYPE_IMX_GPT)
-typedef struct IMXGPTState{
+struct IMXGPTState {
/*< private >*/
SysBusDevice parent_obj;
@@ -111,6 +115,6 @@ typedef struct IMXGPTState{
qemu_irq irq;
const IMXClk *clocks;
-} IMXGPTState;
+};
#endif /* IMX_GPT_H */
diff --git a/include/hw/timer/mss-timer.h b/include/hw/timer/mss-timer.h
index e5a784b27e..da38512904 100644
--- a/include/hw/timer/mss-timer.h
+++ b/include/hw/timer/mss-timer.h
@@ -27,10 +27,10 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "qom/object.h"
#define TYPE_MSS_TIMER "mss-timer"
-#define MSS_TIMER(obj) OBJECT_CHECK(MSSTimerState, \
- (obj), TYPE_MSS_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(MSSTimerState, MSS_TIMER)
/*
* There are two 32-bit down counting timers.
@@ -52,12 +52,12 @@ struct Msf2Timer {
qemu_irq irq;
};
-typedef struct MSSTimerState {
+struct MSSTimerState {
SysBusDevice parent_obj;
MemoryRegion mmio;
uint32_t freq_hz;
struct Msf2Timer timers[NUM_TIMERS];
-} MSSTimerState;
+};
#endif /* HW_MSS_TIMER_H */
diff --git a/include/hw/timer/npcm7xx_timer.h b/include/hw/timer/npcm7xx_timer.h
new file mode 100644
index 0000000000..d45c051b56
--- /dev/null
+++ b/include/hw/timer/npcm7xx_timer.h
@@ -0,0 +1,113 @@
+/*
+ * Nuvoton NPCM7xx Timer Controller
+ *
+ * Copyright 2020 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef NPCM7XX_TIMER_H
+#define NPCM7XX_TIMER_H
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+
+/* Each Timer Module (TIM) instance holds five 25 MHz timers. */
+#define NPCM7XX_TIMERS_PER_CTRL (5)
+
+/*
+ * Number of registers in our device state structure. Don't change this without
+ * incrementing the version_id in the vmstate.
+ */
+#define NPCM7XX_TIMER_NR_REGS (0x54 / sizeof(uint32_t))
+
+/* The basic watchdog timer period is 2^14 clock cycles. */
+#define NPCM7XX_WATCHDOG_BASETIME_SHIFT 14
+
+#define NPCM7XX_WATCHDOG_RESET_GPIO_OUT "npcm7xx-clk-watchdog-reset-gpio-out"
+
+typedef struct NPCM7xxTimerCtrlState NPCM7xxTimerCtrlState;
+
+/**
+ * struct NPCM7xxBaseTimer - Basic functionality that both regular timer and
+ * watchdog timer use.
+ * @qtimer: QEMU timer that notifies us on expiration.
+ * @expires_ns: Absolute virtual expiration time.
+ * @remaining_ns: Remaining time until expiration if timer is paused.
+ */
+typedef struct NPCM7xxBaseTimer {
+ QEMUTimer qtimer;
+ int64_t expires_ns;
+ int64_t remaining_ns;
+} NPCM7xxBaseTimer;
+
+/**
+ * struct NPCM7xxTimer - Individual timer state.
+ * @ctrl: The timer module that owns this timer.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @base_timer: The basic timer functionality for this timer.
+ * @tcsr: The Timer Control and Status Register.
+ * @ticr: The Timer Initial Count Register.
+ */
+typedef struct NPCM7xxTimer {
+ NPCM7xxTimerCtrlState *ctrl;
+
+ qemu_irq irq;
+ NPCM7xxBaseTimer base_timer;
+
+ uint32_t tcsr;
+ uint32_t ticr;
+} NPCM7xxTimer;
+
+/**
+ * struct NPCM7xxWatchdogTimer - The watchdog timer state.
+ * @ctrl: The timer module that owns this timer.
+ * @irq: GIC interrupt line to fire on expiration (if enabled).
+ * @reset_signal: The GPIO used to send a reset signal.
+ * @base_timer: The basic timer functionality for this timer.
+ * @wtcr: The Watchdog Timer Control Register.
+ */
+typedef struct NPCM7xxWatchdogTimer {
+ NPCM7xxTimerCtrlState *ctrl;
+
+ qemu_irq irq;
+ qemu_irq reset_signal;
+ NPCM7xxBaseTimer base_timer;
+
+ uint32_t wtcr;
+} NPCM7xxWatchdogTimer;
+
+/**
+ * struct NPCM7xxTimerCtrlState - Timer Module device state.
+ * @parent: System bus device.
+ * @iomem: Memory region through which registers are accessed.
+ * @index: The index of this timer module.
+ * @tisr: The Timer Interrupt Status Register.
+ * @timer: The five individual timers managed by this module.
+ * @watchdog_timer: The watchdog timer managed by this module.
+ */
+struct NPCM7xxTimerCtrlState {
+ SysBusDevice parent;
+
+ MemoryRegion iomem;
+
+ uint32_t tisr;
+
+ Clock *clock;
+ NPCM7xxTimer timer[NPCM7XX_TIMERS_PER_CTRL];
+ NPCM7xxWatchdogTimer watchdog_timer;
+};
+
+#define TYPE_NPCM7XX_TIMER "npcm7xx-timer"
+#define NPCM7XX_TIMER(obj) \
+ OBJECT_CHECK(NPCM7xxTimerCtrlState, (obj), TYPE_NPCM7XX_TIMER)
+
+#endif /* NPCM7XX_TIMER_H */
diff --git a/include/hw/timer/nrf51_timer.h b/include/hw/timer/nrf51_timer.h
index eb6815f21d..76827c11dc 100644
--- a/include/hw/timer/nrf51_timer.h
+++ b/include/hw/timer/nrf51_timer.h
@@ -15,8 +15,9 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TYPE_NRF51_TIMER "nrf51_soc.timer"
-#define NRF51_TIMER(obj) OBJECT_CHECK(NRF51TimerState, (obj), TYPE_NRF51_TIMER)
+OBJECT_DECLARE_SIMPLE_TYPE(NRF51TimerState, NRF51_TIMER)
#define NRF51_TIMER_REG_COUNT 4
@@ -53,7 +54,7 @@
#define NRF51_TIMER_REG_CC0 0x540
#define NRF51_TIMER_REG_CC3 0x54C
-typedef struct NRF51TimerState {
+struct NRF51TimerState {
SysBusDevice parent_obj;
MemoryRegion iomem;
@@ -75,7 +76,7 @@ typedef struct NRF51TimerState {
uint32_t bitmode;
uint32_t prescaler;
-} NRF51TimerState;
+};
#endif
diff --git a/include/hw/timer/renesas_cmt.h b/include/hw/timer/renesas_cmt.h
index e28a15cb38..1c0b65c1d5 100644
--- a/include/hw/timer/renesas_cmt.h
+++ b/include/hw/timer/renesas_cmt.h
@@ -11,16 +11,19 @@
#include "qemu/timer.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_RENESAS_CMT "renesas-cmt"
-#define RCMT(obj) OBJECT_CHECK(RCMTState, (obj), TYPE_RENESAS_CMT)
+typedef struct RCMTState RCMTState;
+DECLARE_INSTANCE_CHECKER(RCMTState, RCMT,
+ TYPE_RENESAS_CMT)
enum {
CMT_CH = 2,
CMT_NR_IRQ = 1 * CMT_CH
};
-typedef struct RCMTState {
+struct RCMTState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -35,6 +38,6 @@ typedef struct RCMTState {
int64_t tick[CMT_CH];
qemu_irq cmi[CMT_CH];
QEMUTimer timer[CMT_CH];
-} RCMTState;
+};
#endif
diff --git a/include/hw/timer/renesas_tmr.h b/include/hw/timer/renesas_tmr.h
index cf3baa7a28..caf7eec0dc 100644
--- a/include/hw/timer/renesas_tmr.h
+++ b/include/hw/timer/renesas_tmr.h
@@ -11,9 +11,12 @@
#include "qemu/timer.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_RENESAS_TMR "renesas-tmr"
-#define RTMR(obj) OBJECT_CHECK(RTMRState, (obj), TYPE_RENESAS_TMR)
+typedef struct RTMRState RTMRState;
+DECLARE_INSTANCE_CHECKER(RTMRState, RTMR,
+ TYPE_RENESAS_TMR)
enum timer_event {
cmia = 0,
@@ -28,7 +31,7 @@ enum {
TMR_NR_IRQ = 3 * TMR_CH
};
-typedef struct RTMRState {
+struct RTMRState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
@@ -50,6 +53,6 @@ typedef struct RTMRState {
qemu_irq cmib[TMR_CH];
qemu_irq ovi[TMR_CH];
QEMUTimer timer[TMR_CH];
-} RTMRState;
+};
#endif
diff --git a/include/hw/timer/sifive_pwm.h b/include/hw/timer/sifive_pwm.h
new file mode 100644
index 0000000000..6a8cf7b29e
--- /dev/null
+++ b/include/hw/timer/sifive_pwm.h
@@ -0,0 +1,62 @@
+/*
+ * SiFive PWM
+ *
+ * Copyright (c) 2020 Western Digital
+ *
+ * Author: Alistair Francis <alistair.francis@wdc.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef HW_SIFIVE_PWM_H
+#define HW_SIFIVE_PWM_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+
+#define TYPE_SIFIVE_PWM "sifive-pwm"
+
+#define SIFIVE_PWM(obj) \
+ OBJECT_CHECK(SiFivePwmState, (obj), TYPE_SIFIVE_PWM)
+
+#define SIFIVE_PWM_CHANS 4
+#define SIFIVE_PWM_IRQS SIFIVE_PWM_CHANS
+
+typedef struct SiFivePwmState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion mmio;
+ QEMUTimer timer[SIFIVE_PWM_CHANS];
+ /*
+ * if en bit(s) set, is the number of ticks when pwmcount was 0
+ * if en bit(s) not set, is the number of ticks in pwmcount
+ */
+ uint64_t tick_offset;
+ uint64_t freq_hz;
+
+ uint32_t pwmcfg;
+ uint32_t pwmcmp[SIFIVE_PWM_CHANS];
+
+ qemu_irq irqs[SIFIVE_PWM_IRQS];
+} SiFivePwmState;
+
+#endif /* HW_SIFIVE_PWM_H */
diff --git a/include/hw/timer/sse-counter.h b/include/hw/timer/sse-counter.h
new file mode 100644
index 0000000000..b433e58d37
--- /dev/null
+++ b/include/hw/timer/sse-counter.h
@@ -0,0 +1,105 @@
+/*
+ * Arm SSE Subsystem System Counter
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System counter" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + Clock input "CLK": clock
+ * + sysbus MMIO region 0: the control register frame
+ * + sysbus MMIO region 1: the status register frame
+ *
+ * Consumers of the system counter's timestamp, such as the SSE
+ * System Timer device, can also use the APIs sse_counter_for_timestamp(),
+ * sse_counter_tick_to_time() and sse_counter_register_consumer() to
+ * interact with an instance of the System Counter. Generally the
+ * consumer device should have a QOM link property which the board
+ * code can set to the appropriate instance of the system counter.
+ */
+
+#ifndef SSE_COUNTER_H
+#define SSE_COUNTER_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "qemu/notify.h"
+
+#define TYPE_SSE_COUNTER "sse-counter"
+OBJECT_DECLARE_SIMPLE_TYPE(SSECounter, SSE_COUNTER)
+
+struct SSECounter {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion control_mr;
+ MemoryRegion status_mr;
+ Clock *clk;
+ NotifierList notifier_list;
+
+ uint32_t cntcr;
+ uint32_t cntscr0;
+
+ /*
+ * These are used for handling clock frequency changes: they are a
+ * tuple of (QEMU_CLOCK_VIRTUAL timestamp, CNTCV at that time),
+ * taken when the clock frequency changes. sse_cntcv() needs them
+ * to calculate the current CNTCV.
+ */
+ uint64_t ns_then;
+ uint64_t ticks_then;
+};
+
+/*
+ * These functions are the interface by which a consumer of
+ * the system timestamp (such as the SSE system timer device)
+ * can communicate with the SSECounter.
+ */
+
+/**
+ * sse_counter_for_timestamp:
+ * @counter: SSECounter
+ * @ns: timestamp of QEMU_CLOCK_VIRTUAL in nanoseconds
+ *
+ * Returns the value of the timestamp counter at the specified
+ * point in time (assuming that no changes to scale factor, enable, etc
+ * happen in the meantime).
+ */
+uint64_t sse_counter_for_timestamp(SSECounter *counter, uint64_t ns);
+
+/**
+ * sse_counter_tick_to_time:
+ * @counter: SSECounter
+ * @tick: tick value
+ *
+ * Returns the time (a QEMU_CLOCK_VIRTUAL timestamp in nanoseconds)
+ * when the timestamp counter will reach the specified tick count.
+ * If the counter is not currently running, returns UINT64_MAX.
+ */
+uint64_t sse_counter_tick_to_time(SSECounter *counter, uint64_t tick);
+
+/**
+ * sse_counter_register_consumer:
+ * @counter: SSECounter
+ * @notifier: Notifier which is notified on counter changes
+ *
+ * Registers @notifier with the SSECounter. When the counter's
+ * configuration changes in a way that might invalidate information
+ * previously returned via sse_counter_for_timestamp() or
+ * sse_counter_tick_to_time(), the notifier will be called.
+ * Devices which consume the timestamp counter can use this as
+ * a cue to recalculate timer events.
+ */
+void sse_counter_register_consumer(SSECounter *counter, Notifier *notifier);
+
+#endif
diff --git a/include/hw/timer/sse-timer.h b/include/hw/timer/sse-timer.h
new file mode 100644
index 0000000000..265ad32400
--- /dev/null
+++ b/include/hw/timer/sse-timer.h
@@ -0,0 +1,54 @@
+/*
+ * Arm SSE Subsystem System Timer
+ *
+ * Copyright (c) 2020 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+/*
+ * This is a model of the "System timer" which is documented in
+ * the Arm SSE-123 Example Subsystem Technical Reference Manual:
+ * https://developer.arm.com/documentation/101370/latest/
+ *
+ * QEMU interface:
+ * + QOM property "counter": link property to be set to the
+ * TYPE_SSE_COUNTER timestamp counter device this timer runs off
+ * + sysbus MMIO region 0: the register bank
+ * + sysbus IRQ 0: timer interrupt
+ */
+
+#ifndef SSE_TIMER_H
+#define SSE_TIMER_H
+
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "qom/object.h"
+#include "hw/timer/sse-counter.h"
+
+#define TYPE_SSE_TIMER "sse-timer"
+OBJECT_DECLARE_SIMPLE_TYPE(SSETimer, SSE_TIMER)
+
+struct SSETimer {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq irq;
+ SSECounter *counter;
+ QEMUTimer timer;
+ Notifier counter_notifier;
+
+ uint32_t cntfrq;
+ uint32_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t cntp_aival;
+ uint32_t cntp_aival_ctl;
+ uint32_t cntp_aival_reload;
+};
+
+#endif
diff --git a/include/hw/timer/stellaris-gptm.h b/include/hw/timer/stellaris-gptm.h
new file mode 100644
index 0000000000..fde1fc6f0c
--- /dev/null
+++ b/include/hw/timer/stellaris-gptm.h
@@ -0,0 +1,51 @@
+/*
+ * Luminary Micro Stellaris General Purpose Timer Module
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifndef HW_TIMER_STELLARIS_GPTM_H
+#define HW_TIMER_STELLARIS_GPTM_H
+
+#include "qom/object.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/clock.h"
+
+#define TYPE_STELLARIS_GPTM "stellaris-gptm"
+OBJECT_DECLARE_SIMPLE_TYPE(gptm_state, STELLARIS_GPTM)
+
+/*
+ * QEMU interface:
+ * + sysbus MMIO region 0: register bank
+ * + sysbus IRQ 0: timer interrupt
+ * + unnamed GPIO output 0: trigger output for the ADC
+ * + Clock input "clk": the 32-bit countdown timer runs at this speed
+ */
+struct gptm_state {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ uint32_t config;
+ uint32_t mode[2];
+ uint32_t control;
+ uint32_t state;
+ uint32_t mask;
+ uint32_t load[2];
+ uint32_t match[2];
+ uint32_t prescale[2];
+ uint32_t match_prescale[2];
+ uint32_t rtc;
+ int64_t tick[2];
+ struct gptm_state *opaque[2];
+ QEMUTimer *timer[2];
+ /* The timers have an alternate output used to trigger the ADC. */
+ qemu_irq trigger;
+ qemu_irq irq;
+ Clock *clk;
+};
+
+#endif
diff --git a/include/hw/timer/stm32f2xx_timer.h b/include/hw/timer/stm32f2xx_timer.h
index a96bc08b1b..90f40f1746 100644
--- a/include/hw/timer/stm32f2xx_timer.h
+++ b/include/hw/timer/stm32f2xx_timer.h
@@ -27,6 +27,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
+#include "qom/object.h"
#define TIM_CR1 0x00
#define TIM_CR2 0x04
@@ -61,10 +62,11 @@
#define TIM_DIER_UIE 1
#define TYPE_STM32F2XX_TIMER "stm32f2xx-timer"
-#define STM32F2XXTIMER(obj) OBJECT_CHECK(STM32F2XXTimerState, \
- (obj), TYPE_STM32F2XX_TIMER)
+typedef struct STM32F2XXTimerState STM32F2XXTimerState;
+DECLARE_INSTANCE_CHECKER(STM32F2XXTimerState, STM32F2XXTIMER,
+ TYPE_STM32F2XX_TIMER)
-typedef struct STM32F2XXTimerState {
+struct STM32F2XXTimerState {
/* <private> */
SysBusDevice parent_obj;
@@ -95,6 +97,6 @@ typedef struct STM32F2XXTimerState {
uint32_t tim_dcr;
uint32_t tim_dmar;
uint32_t tim_or;
-} STM32F2XXTimerState;
+};
#endif /* HW_STM32F2XX_TIMER_H */
diff --git a/include/hw/tricore/tc27x_soc.h b/include/hw/tricore/tc27x_soc.h
new file mode 100644
index 0000000000..dd3a7485c8
--- /dev/null
+++ b/include/hw/tricore/tc27x_soc.h
@@ -0,0 +1,129 @@
+/*
+ * Infineon tc27x SoC System emulation.
+ *
+ * Copyright (c) 2020 Andreas Konopik <andreas.konopik@efs-auto.de>
+ * Copyright (c) 2020 David Brenken <david.brenken@efs-auto.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TC27X_SOC_H
+#define TC27X_SOC_H
+
+#include "hw/sysbus.h"
+#include "target/tricore/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_TC27X_SOC ("tc27x-soc")
+OBJECT_DECLARE_TYPE(TC27XSoCState, TC27XSoCClass, TC27X_SOC)
+
+typedef struct TC27XSoCCPUMemState {
+
+ MemoryRegion dspr;
+ MemoryRegion pspr;
+
+ MemoryRegion dcache;
+ MemoryRegion dtag;
+ MemoryRegion pcache;
+ MemoryRegion ptag;
+
+} TC27XSoCCPUMemState;
+
+typedef struct TC27XSoCFlashMemState {
+
+ MemoryRegion pflash0_c;
+ MemoryRegion pflash1_c;
+ MemoryRegion pflash0_u;
+ MemoryRegion pflash1_u;
+ MemoryRegion dflash0;
+ MemoryRegion dflash1;
+ MemoryRegion olda_c;
+ MemoryRegion olda_u;
+ MemoryRegion brom_c;
+ MemoryRegion brom_u;
+ MemoryRegion lmuram_c;
+ MemoryRegion lmuram_u;
+ MemoryRegion emem_c;
+ MemoryRegion emem_u;
+
+} TC27XSoCFlashMemState;
+
+typedef struct TC27XSoCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ TriCoreCPU cpu;
+
+ MemoryRegion dsprX;
+ MemoryRegion psprX;
+
+ TC27XSoCCPUMemState cpu0mem;
+ TC27XSoCCPUMemState cpu1mem;
+ TC27XSoCCPUMemState cpu2mem;
+
+ TC27XSoCFlashMemState flashmem;
+
+} TC27XSoCState;
+
+typedef struct MemmapEntry {
+ hwaddr base;
+ hwaddr size;
+} MemmapEntry;
+
+typedef struct TC27XSoCClass {
+ DeviceClass parent_class;
+
+ const char *name;
+ const char *cpu_type;
+ const MemmapEntry *memmap;
+ uint32_t num_cpus;
+} TC27XSoCClass;
+
+enum {
+ TC27XD_DSPR2,
+ TC27XD_DCACHE2,
+ TC27XD_DTAG2,
+ TC27XD_PSPR2,
+ TC27XD_PCACHE2,
+ TC27XD_PTAG2,
+ TC27XD_DSPR1,
+ TC27XD_DCACHE1,
+ TC27XD_DTAG1,
+ TC27XD_PSPR1,
+ TC27XD_PCACHE1,
+ TC27XD_PTAG1,
+ TC27XD_DSPR0,
+ TC27XD_PSPR0,
+ TC27XD_PCACHE0,
+ TC27XD_PTAG0,
+ TC27XD_PFLASH0_C,
+ TC27XD_PFLASH1_C,
+ TC27XD_OLDA_C,
+ TC27XD_BROM_C,
+ TC27XD_LMURAM_C,
+ TC27XD_EMEM_C,
+ TC27XD_PFLASH0_U,
+ TC27XD_PFLASH1_U,
+ TC27XD_DFLASH0,
+ TC27XD_DFLASH1,
+ TC27XD_OLDA_U,
+ TC27XD_BROM_U,
+ TC27XD_LMURAM_U,
+ TC27XD_EMEM_U,
+ TC27XD_PSPRX,
+ TC27XD_DSPRX,
+};
+
+#endif
diff --git a/include/hw/tricore/triboard.h b/include/hw/tricore/triboard.h
new file mode 100644
index 0000000000..4fdd2d7d97
--- /dev/null
+++ b/include/hw/tricore/triboard.h
@@ -0,0 +1,48 @@
+/*
+ * Infineon TriBoard System emulation.
+ *
+ * Copyright (c) 2020 Andreas Konopik <andreas.konopik@efs-auto.de>
+ * Copyright (c) 2020 David Brenken <david.brenken@efs-auto.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "qom/object.h"
+
+#include "hw/tricore/tc27x_soc.h"
+
+#define TYPE_TRIBOARD_MACHINE MACHINE_TYPE_NAME("triboard")
+typedef struct TriBoardMachineState TriBoardMachineState;
+typedef struct TriBoardMachineClass TriBoardMachineClass;
+DECLARE_OBJ_CHECKERS(TriBoardMachineState, TriBoardMachineClass,
+ TRIBOARD_MACHINE, TYPE_TRIBOARD_MACHINE)
+
+
+struct TriBoardMachineState {
+ MachineState parent;
+
+ TC27XSoCState tc27x_soc;
+};
+
+struct TriBoardMachineClass {
+ MachineClass parent_obj;
+
+ const char *name;
+ const char *desc;
+ const char *soc_name;
+};
diff --git a/include/exec/tb-context.h b/include/hw/tricore/tricore_testdevice.h
index feb585e0a7..2c57b62f22 100644
--- a/include/exec/tb-context.h
+++ b/include/hw/tricore/tricore_testdevice.h
@@ -1,7 +1,5 @@
/*
- * Internal structs that QEMU exports to TCG
- *
- * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2018-2021 Bastian Koppelmann Paderborn University
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -17,26 +15,19 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef QEMU_TB_CONTEXT_H
-#define QEMU_TB_CONTEXT_H
-
-#include "qemu/thread.h"
-#include "qemu/qht.h"
-
-#define CODE_GEN_HTABLE_BITS 15
-#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS)
-
-typedef struct TranslationBlock TranslationBlock;
-typedef struct TBContext TBContext;
+#ifndef HW_TRICORE_TESTDEVICE_H
+#define HW_TRICORE_TESTDEVICE_H
-struct TBContext {
+#include "hw/sysbus.h"
- struct qht htable;
+#define TYPE_TRICORE_TESTDEVICE "tricore_testdevice"
+#define TRICORE_TESTDEVICE(obj) \
+ OBJECT_CHECK(TriCoreTestDeviceState, (obj), TYPE_TRICORE_TESTDEVICE)
- /* statistics */
- unsigned tb_flush_count;
-};
+typedef struct {
+ SysBusDevice parent_obj;
-extern TBContext tb_ctx;
+ MemoryRegion iomem;
+} TriCoreTestDeviceState;
#endif
diff --git a/include/hw/unicore32/puv3.h b/include/hw/unicore32/puv3.h
deleted file mode 100644
index f587a1f622..0000000000
--- a/include/hw/unicore32/puv3.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Misc PKUnity SoC declarations
- *
- * Copyright (C) 2010-2012 Guan Xuetao
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation, or any later version.
- * See the COPYING file in the top-level directory.
- */
-
-#ifndef QEMU_HW_PUV3_H
-#define QEMU_HW_PUV3_H
-
-#define PUV3_REGS_OFFSET (0x1000) /* 4K is reasonable */
-
-/* Hardware interrupts */
-#define PUV3_IRQS_NR (32)
-
-#define PUV3_IRQS_GPIOLOW0 (0)
-#define PUV3_IRQS_GPIOLOW1 (1)
-#define PUV3_IRQS_GPIOLOW2 (2)
-#define PUV3_IRQS_GPIOLOW3 (3)
-#define PUV3_IRQS_GPIOLOW4 (4)
-#define PUV3_IRQS_GPIOLOW5 (5)
-#define PUV3_IRQS_GPIOLOW6 (6)
-#define PUV3_IRQS_GPIOLOW7 (7)
-#define PUV3_IRQS_GPIOHIGH (8)
-#define PUV3_IRQS_PS2_KBD (22)
-#define PUV3_IRQS_PS2_AUX (23)
-#define PUV3_IRQS_OST0 (26)
-
-/* All puv3_*.c use DPRINTF for debug. */
-#ifdef DEBUG_PUV3
-#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__)
-#else
-#define DPRINTF(fmt, ...) do {} while (0)
-#endif
-
-#endif /* QEMU_HW_PUV3_H */
diff --git a/include/hw/usb.h b/include/hw/usb.h
index e29a37635b..d46d96779a 100644
--- a/include/hw/usb.h
+++ b/include/hw/usb.h
@@ -29,6 +29,8 @@
#include "hw/qdev-core.h"
#include "qemu/iov.h"
#include "qemu/queue.h"
+#include "qom/object.h"
+#include "qapi/error.h"
/* Constants related to the USB / PCI interaction */
#define USB_SBRN 0x60 /* Serial Bus Release Number Register */
@@ -65,42 +67,42 @@
//#define USB_STATE_POWERED 2
#define USB_STATE_DEFAULT 3
//#define USB_STATE_ADDRESS 4
-//#define USB_STATE_CONFIGURED 5
+//#define USB_STATE_CONFIGURED 5
#define USB_STATE_SUSPENDED 6
-#define USB_CLASS_AUDIO 1
-#define USB_CLASS_COMM 2
-#define USB_CLASS_HID 3
-#define USB_CLASS_PHYSICAL 5
-#define USB_CLASS_STILL_IMAGE 6
-#define USB_CLASS_PRINTER 7
-#define USB_CLASS_MASS_STORAGE 8
-#define USB_CLASS_HUB 9
-#define USB_CLASS_CDC_DATA 0x0a
-#define USB_CLASS_CSCID 0x0b
-#define USB_CLASS_CONTENT_SEC 0x0d
-#define USB_CLASS_APP_SPEC 0xfe
-#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_CLASS_AUDIO 1
+#define USB_CLASS_COMM 2
+#define USB_CLASS_HID 3
+#define USB_CLASS_PHYSICAL 5
+#define USB_CLASS_STILL_IMAGE 6
+#define USB_CLASS_PRINTER 7
+#define USB_CLASS_MASS_STORAGE 8
+#define USB_CLASS_HUB 9
+#define USB_CLASS_CDC_DATA 0x0a
+#define USB_CLASS_CSCID 0x0b
+#define USB_CLASS_CONTENT_SEC 0x0d
+#define USB_CLASS_APP_SPEC 0xfe
+#define USB_CLASS_VENDOR_SPEC 0xff
#define USB_SUBCLASS_UNDEFINED 0
#define USB_SUBCLASS_AUDIO_CONTROL 1
#define USB_SUBCLASS_AUDIO_STREAMING 2
#define USB_SUBCLASS_AUDIO_MIDISTREAMING 3
-#define USB_DIR_OUT 0
-#define USB_DIR_IN 0x80
+#define USB_DIR_OUT 0
+#define USB_DIR_IN 0x80
-#define USB_TYPE_MASK (0x03 << 5)
-#define USB_TYPE_STANDARD (0x00 << 5)
-#define USB_TYPE_CLASS (0x01 << 5)
-#define USB_TYPE_VENDOR (0x02 << 5)
-#define USB_TYPE_RESERVED (0x03 << 5)
+#define USB_TYPE_MASK (0x03 << 5)
+#define USB_TYPE_STANDARD (0x00 << 5)
+#define USB_TYPE_CLASS (0x01 << 5)
+#define USB_TYPE_VENDOR (0x02 << 5)
+#define USB_TYPE_RESERVED (0x03 << 5)
-#define USB_RECIP_MASK 0x1f
-#define USB_RECIP_DEVICE 0x00
-#define USB_RECIP_INTERFACE 0x01
-#define USB_RECIP_ENDPOINT 0x02
-#define USB_RECIP_OTHER 0x03
+#define USB_RECIP_MASK 0x1f
+#define USB_RECIP_DEVICE 0x00
+#define USB_RECIP_INTERFACE 0x01
+#define USB_RECIP_ENDPOINT 0x02
+#define USB_RECIP_OTHER 0x03
#define DeviceRequest ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
#define DeviceOutRequest ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
@@ -125,28 +127,28 @@
#define EndpointOutRequest \
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
-#define USB_REQ_GET_STATUS 0x00
-#define USB_REQ_CLEAR_FEATURE 0x01
-#define USB_REQ_SET_FEATURE 0x03
-#define USB_REQ_SET_ADDRESS 0x05
-#define USB_REQ_GET_DESCRIPTOR 0x06
-#define USB_REQ_SET_DESCRIPTOR 0x07
-#define USB_REQ_GET_CONFIGURATION 0x08
-#define USB_REQ_SET_CONFIGURATION 0x09
-#define USB_REQ_GET_INTERFACE 0x0A
-#define USB_REQ_SET_INTERFACE 0x0B
-#define USB_REQ_SYNCH_FRAME 0x0C
+#define USB_REQ_GET_STATUS 0x00
+#define USB_REQ_CLEAR_FEATURE 0x01
+#define USB_REQ_SET_FEATURE 0x03
+#define USB_REQ_SET_ADDRESS 0x05
+#define USB_REQ_GET_DESCRIPTOR 0x06
+#define USB_REQ_SET_DESCRIPTOR 0x07
+#define USB_REQ_GET_CONFIGURATION 0x08
+#define USB_REQ_SET_CONFIGURATION 0x09
+#define USB_REQ_GET_INTERFACE 0x0A
+#define USB_REQ_SET_INTERFACE 0x0B
+#define USB_REQ_SYNCH_FRAME 0x0C
#define USB_REQ_SET_SEL 0x30
#define USB_REQ_SET_ISOCH_DELAY 0x31
-#define USB_DEVICE_SELF_POWERED 0
-#define USB_DEVICE_REMOTE_WAKEUP 1
+#define USB_DEVICE_SELF_POWERED 0
+#define USB_DEVICE_REMOTE_WAKEUP 1
-#define USB_DT_DEVICE 0x01
-#define USB_DT_CONFIG 0x02
-#define USB_DT_STRING 0x03
-#define USB_DT_INTERFACE 0x04
-#define USB_DT_ENDPOINT 0x05
+#define USB_DT_DEVICE 0x01
+#define USB_DT_CONFIG 0x02
+#define USB_DT_STRING 0x03
+#define USB_DT_INTERFACE 0x04
+#define USB_DT_ENDPOINT 0x05
#define USB_DT_DEVICE_QUALIFIER 0x06
#define USB_DT_OTHER_SPEED_CONFIG 0x07
#define USB_DT_DEBUG 0x0A
@@ -166,15 +168,14 @@
#define USB_CFG_ATT_WAKEUP (1 << 5)
#define USB_CFG_ATT_BATTERY (1 << 4)
-#define USB_ENDPOINT_XFER_CONTROL 0
-#define USB_ENDPOINT_XFER_ISOC 1
-#define USB_ENDPOINT_XFER_BULK 2
-#define USB_ENDPOINT_XFER_INT 3
+#define USB_ENDPOINT_XFER_CONTROL 0
+#define USB_ENDPOINT_XFER_ISOC 1
+#define USB_ENDPOINT_XFER_BULK 2
+#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_XFER_INVALID 255
#define USB_INTERFACE_INVALID 255
-typedef struct USBBus USBBus;
typedef struct USBBusOps USBBusOps;
typedef struct USBPort USBPort;
typedef struct USBDevice USBDevice;
@@ -216,10 +217,10 @@ struct USBEndpoint {
};
enum USBDeviceFlags {
- USB_DEV_FLAG_FULL_PATH,
USB_DEV_FLAG_IS_HOST,
USB_DEV_FLAG_MSOS_DESC_ENABLE,
USB_DEV_FLAG_MSOS_DESC_IN_USE,
+ USB_DEV_FLAG_IS_SCSI_STORAGE,
};
/* definition of a USB device */
@@ -231,6 +232,9 @@ struct USBDevice {
void *opaque;
uint32_t flags;
+ char *pcap_filename;
+ FILE *pcap;
+
/* Actual connected speed */
int speed;
/* Supported speeds, not in info because it may be variable (hostdevs) */
@@ -264,17 +268,12 @@ struct USBDevice {
};
#define TYPE_USB_DEVICE "usb-device"
-#define USB_DEVICE(obj) \
- OBJECT_CHECK(USBDevice, (obj), TYPE_USB_DEVICE)
-#define USB_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(USBDeviceClass, (klass), TYPE_USB_DEVICE)
-#define USB_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(USBDeviceClass, (obj), TYPE_USB_DEVICE)
+OBJECT_DECLARE_TYPE(USBDevice, USBDeviceClass, USB_DEVICE)
typedef void (*USBDeviceRealize)(USBDevice *dev, Error **errp);
typedef void (*USBDeviceUnrealize)(USBDevice *dev);
-typedef struct USBDeviceClass {
+struct USBDeviceClass {
DeviceClass parent_class;
USBDeviceRealize realize;
@@ -346,7 +345,7 @@ typedef struct USBDeviceClass {
const char *product_desc;
const USBDesc *usb_desc;
bool attached_settable;
-} USBDeviceClass;
+};
typedef struct USBPortOps {
void (*attach)(USBPort *port);
@@ -468,7 +467,6 @@ void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p);
/* usb-linux.c */
void hmp_info_usbhost(Monitor *mon, const QDict *qdict);
-bool usb_host_dev_is_scsi_storage(USBDevice *usbdev);
/* usb ports of the VM */
@@ -477,7 +475,7 @@ bool usb_host_dev_is_scsi_storage(USBDevice *usbdev);
/* usb-bus.c */
#define TYPE_USB_BUS "usb-bus"
-#define USB_BUS(obj) OBJECT_CHECK(USBBus, (obj), TYPE_USB_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(USBBus, USB_BUS)
struct USBBus {
BusState qbus;
@@ -500,12 +498,8 @@ struct USBBusOps {
void usb_bus_new(USBBus *bus, size_t bus_size,
USBBusOps *ops, DeviceState *host);
void usb_bus_release(USBBus *bus);
-USBBus *usb_bus_find(int busnr);
void usb_legacy_register(const char *typename, const char *usbdevice_name,
- USBDevice *(*usbdevice_init)(const char *params));
-USBDevice *usb_new(const char *name);
-bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp);
-USBDevice *usb_create_simple(USBBus *bus, const char *name);
+ USBDevice *(*usbdevice_init)(void));
USBDevice *usbdevice_create(const char *cmdline);
void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
USBPortOps *ops, int speedmask);
@@ -564,15 +558,48 @@ const char *usb_device_get_product_desc(USBDevice *dev);
const USBDesc *usb_device_get_usb_desc(USBDevice *dev);
+static inline bool usb_device_is_scsi_storage(USBDevice *dev)
+{
+ return dev->flags & (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
+}
+
/* quirks.c */
/* In bulk endpoints are streaming data sources (iow behave like isoc eps) */
-#define USB_QUIRK_BUFFER_BULK_IN 0x01
+#define USB_QUIRK_BUFFER_BULK_IN 0x01
/* Bulk pkts in FTDI format, need special handling when combining packets */
-#define USB_QUIRK_IS_FTDI 0x02
+#define USB_QUIRK_IS_FTDI 0x02
int usb_get_quirks(uint16_t vendor_id, uint16_t product_id,
uint8_t interface_class, uint8_t interface_subclass,
uint8_t interface_protocol);
+/* pcap.c */
+void usb_pcap_init(FILE *fp);
+void usb_pcap_ctrl(USBPacket *p, bool setup);
+void usb_pcap_data(USBPacket *p, bool setup);
+
+static inline USBDevice *usb_new(const char *name)
+{
+ return USB_DEVICE(qdev_new(name));
+}
+
+static inline USBDevice *usb_try_new(const char *name)
+{
+ return USB_DEVICE(qdev_try_new(name));
+}
+
+static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp)
+{
+ return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
+}
+
+static inline USBDevice *usb_create_simple(USBBus *bus, const char *name)
+{
+ USBDevice *dev = usb_new(name);
+
+ usb_realize_and_unref(dev, bus, &error_abort);
+ return dev;
+}
+
#endif
diff --git a/include/hw/usb/chipidea.h b/include/hw/usb/chipidea.h
index 1ec2e9dbda..fe4113ee01 100644
--- a/include/hw/usb/chipidea.h
+++ b/include/hw/usb/chipidea.h
@@ -2,15 +2,16 @@
#define CHIPIDEA_H
#include "hw/usb/hcd-ehci.h"
+#include "qom/object.h"
-typedef struct ChipideaState {
+struct ChipideaState {
/*< private >*/
EHCISysBusState parent_obj;
MemoryRegion iomem[3];
-} ChipideaState;
+};
#define TYPE_CHIPIDEA "usb-chipidea"
-#define CHIPIDEA(obj) OBJECT_CHECK(ChipideaState, (obj), TYPE_CHIPIDEA)
+OBJECT_DECLARE_SIMPLE_TYPE(ChipideaState, CHIPIDEA)
#endif /* CHIPIDEA_H */
diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h
index 40af23a0ba..0bf3f2aa17 100644
--- a/include/hw/usb/dwc2-regs.h
+++ b/include/hw/usb/dwc2-regs.h
@@ -39,791 +39,791 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __DWC2_HW_H__
-#define __DWC2_HW_H__
-
-#define HSOTG_REG(x) (x)
-
-#define GOTGCTL HSOTG_REG(0x000)
-#define GOTGCTL_CHIRPEN BIT(27)
-#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22)
-#define GOTGCTL_MULT_VALID_BC_SHIFT 22
-#define GOTGCTL_OTGVER BIT(20)
-#define GOTGCTL_BSESVLD BIT(19)
-#define GOTGCTL_ASESVLD BIT(18)
-#define GOTGCTL_DBNC_SHORT BIT(17)
-#define GOTGCTL_CONID_B BIT(16)
-#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15)
-#define GOTGCTL_DEVHNPEN BIT(11)
-#define GOTGCTL_HSTSETHNPEN BIT(10)
-#define GOTGCTL_HNPREQ BIT(9)
-#define GOTGCTL_HSTNEGSCS BIT(8)
-#define GOTGCTL_SESREQ BIT(1)
-#define GOTGCTL_SESREQSCS BIT(0)
-
-#define GOTGINT HSOTG_REG(0x004)
-#define GOTGINT_DBNCE_DONE BIT(19)
-#define GOTGINT_A_DEV_TOUT_CHG BIT(18)
-#define GOTGINT_HST_NEG_DET BIT(17)
-#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9)
-#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8)
-#define GOTGINT_SES_END_DET BIT(2)
-
-#define GAHBCFG HSOTG_REG(0x008)
-#define GAHBCFG_AHB_SINGLE BIT(23)
-#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22)
-#define GAHBCFG_REM_MEM_SUPP BIT(21)
-#define GAHBCFG_P_TXF_EMP_LVL BIT(8)
-#define GAHBCFG_NP_TXF_EMP_LVL BIT(7)
-#define GAHBCFG_DMA_EN BIT(5)
-#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
-#define GAHBCFG_HBSTLEN_SHIFT 1
-#define GAHBCFG_HBSTLEN_SINGLE 0
-#define GAHBCFG_HBSTLEN_INCR 1
-#define GAHBCFG_HBSTLEN_INCR4 3
-#define GAHBCFG_HBSTLEN_INCR8 5
-#define GAHBCFG_HBSTLEN_INCR16 7
-#define GAHBCFG_GLBL_INTR_EN BIT(0)
-#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
- GAHBCFG_NP_TXF_EMP_LVL | \
- GAHBCFG_DMA_EN | \
- GAHBCFG_GLBL_INTR_EN)
-
-#define GUSBCFG HSOTG_REG(0x00C)
-#define GUSBCFG_FORCEDEVMODE BIT(30)
-#define GUSBCFG_FORCEHOSTMODE BIT(29)
-#define GUSBCFG_TXENDDELAY BIT(28)
-#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27)
-#define GUSBCFG_ICUSBCAP BIT(26)
-#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25)
-#define GUSBCFG_INDICATORPASSTHROUGH BIT(24)
-#define GUSBCFG_INDICATORCOMPLEMENT BIT(23)
-#define GUSBCFG_TERMSELDLPULSE BIT(22)
-#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21)
-#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20)
-#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19)
-#define GUSBCFG_ULPI_AUTO_RES BIT(18)
-#define GUSBCFG_ULPI_FS_LS BIT(17)
-#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16)
-#define GUSBCFG_PHY_LP_CLK_SEL BIT(15)
-#define GUSBCFG_USBTRDTIM_MASK (0xf << 10)
-#define GUSBCFG_USBTRDTIM_SHIFT 10
-#define GUSBCFG_HNPCAP BIT(9)
-#define GUSBCFG_SRPCAP BIT(8)
-#define GUSBCFG_DDRSEL BIT(7)
-#define GUSBCFG_PHYSEL BIT(6)
-#define GUSBCFG_FSINTF BIT(5)
-#define GUSBCFG_ULPI_UTMI_SEL BIT(4)
-#define GUSBCFG_PHYIF16 BIT(3)
-#define GUSBCFG_PHYIF8 (0 << 3)
-#define GUSBCFG_TOUTCAL_MASK (0x7 << 0)
-#define GUSBCFG_TOUTCAL_SHIFT 0
-#define GUSBCFG_TOUTCAL_LIMIT 0x7
-#define GUSBCFG_TOUTCAL(_x) ((_x) << 0)
-
-#define GRSTCTL HSOTG_REG(0x010)
-#define GRSTCTL_AHBIDLE BIT(31)
-#define GRSTCTL_DMAREQ BIT(30)
-#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
-#define GRSTCTL_TXFNUM_SHIFT 6
-#define GRSTCTL_TXFNUM_LIMIT 0x1f
-#define GRSTCTL_TXFNUM(_x) ((_x) << 6)
-#define GRSTCTL_TXFFLSH BIT(5)
-#define GRSTCTL_RXFFLSH BIT(4)
-#define GRSTCTL_IN_TKNQ_FLSH BIT(3)
-#define GRSTCTL_FRMCNTRRST BIT(2)
-#define GRSTCTL_HSFTRST BIT(1)
-#define GRSTCTL_CSFTRST BIT(0)
-
-#define GINTSTS HSOTG_REG(0x014)
-#define GINTMSK HSOTG_REG(0x018)
-#define GINTSTS_WKUPINT BIT(31)
-#define GINTSTS_SESSREQINT BIT(30)
-#define GINTSTS_DISCONNINT BIT(29)
-#define GINTSTS_CONIDSTSCHNG BIT(28)
-#define GINTSTS_LPMTRANRCVD BIT(27)
-#define GINTSTS_PTXFEMP BIT(26)
-#define GINTSTS_HCHINT BIT(25)
-#define GINTSTS_PRTINT BIT(24)
-#define GINTSTS_RESETDET BIT(23)
-#define GINTSTS_FET_SUSP BIT(22)
-#define GINTSTS_INCOMPL_IP BIT(21)
-#define GINTSTS_INCOMPL_SOOUT BIT(21)
-#define GINTSTS_INCOMPL_SOIN BIT(20)
-#define GINTSTS_OEPINT BIT(19)
-#define GINTSTS_IEPINT BIT(18)
-#define GINTSTS_EPMIS BIT(17)
-#define GINTSTS_RESTOREDONE BIT(16)
-#define GINTSTS_EOPF BIT(15)
-#define GINTSTS_ISOUTDROP BIT(14)
-#define GINTSTS_ENUMDONE BIT(13)
-#define GINTSTS_USBRST BIT(12)
-#define GINTSTS_USBSUSP BIT(11)
-#define GINTSTS_ERLYSUSP BIT(10)
-#define GINTSTS_I2CINT BIT(9)
-#define GINTSTS_ULPI_CK_INT BIT(8)
-#define GINTSTS_GOUTNAKEFF BIT(7)
-#define GINTSTS_GINNAKEFF BIT(6)
-#define GINTSTS_NPTXFEMP BIT(5)
-#define GINTSTS_RXFLVL BIT(4)
-#define GINTSTS_SOF BIT(3)
-#define GINTSTS_OTGINT BIT(2)
-#define GINTSTS_MODEMIS BIT(1)
-#define GINTSTS_CURMODE_HOST BIT(0)
-
-#define GRXSTSR HSOTG_REG(0x01C)
-#define GRXSTSP HSOTG_REG(0x020)
-#define GRXSTS_FN_MASK (0x7f << 25)
-#define GRXSTS_FN_SHIFT 25
-#define GRXSTS_PKTSTS_MASK (0xf << 17)
-#define GRXSTS_PKTSTS_SHIFT 17
-#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
-#define GRXSTS_PKTSTS_OUTRX 2
-#define GRXSTS_PKTSTS_HCHIN 2
-#define GRXSTS_PKTSTS_OUTDONE 3
-#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
-#define GRXSTS_PKTSTS_SETUPDONE 4
-#define GRXSTS_PKTSTS_DATATOGGLEERR 5
-#define GRXSTS_PKTSTS_SETUPRX 6
-#define GRXSTS_PKTSTS_HCHHALTED 7
-#define GRXSTS_HCHNUM_MASK (0xf << 0)
-#define GRXSTS_HCHNUM_SHIFT 0
-#define GRXSTS_DPID_MASK (0x3 << 15)
-#define GRXSTS_DPID_SHIFT 15
-#define GRXSTS_BYTECNT_MASK (0x7ff << 4)
-#define GRXSTS_BYTECNT_SHIFT 4
-#define GRXSTS_EPNUM_MASK (0xf << 0)
-#define GRXSTS_EPNUM_SHIFT 0
-
-#define GRXFSIZ HSOTG_REG(0x024)
-#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
-#define GRXFSIZ_DEPTH_SHIFT 0
-
-#define GNPTXFSIZ HSOTG_REG(0x028)
+#ifndef DWC2_REGS_H
+#define DWC2_REGS_H
+
+#define HSOTG_REG(x) (x)
+
+#define GOTGCTL HSOTG_REG(0x000)
+#define GOTGCTL_CHIRPEN BIT(27)
+#define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22)
+#define GOTGCTL_MULT_VALID_BC_SHIFT 22
+#define GOTGCTL_OTGVER BIT(20)
+#define GOTGCTL_BSESVLD BIT(19)
+#define GOTGCTL_ASESVLD BIT(18)
+#define GOTGCTL_DBNC_SHORT BIT(17)
+#define GOTGCTL_CONID_B BIT(16)
+#define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15)
+#define GOTGCTL_DEVHNPEN BIT(11)
+#define GOTGCTL_HSTSETHNPEN BIT(10)
+#define GOTGCTL_HNPREQ BIT(9)
+#define GOTGCTL_HSTNEGSCS BIT(8)
+#define GOTGCTL_SESREQ BIT(1)
+#define GOTGCTL_SESREQSCS BIT(0)
+
+#define GOTGINT HSOTG_REG(0x004)
+#define GOTGINT_DBNCE_DONE BIT(19)
+#define GOTGINT_A_DEV_TOUT_CHG BIT(18)
+#define GOTGINT_HST_NEG_DET BIT(17)
+#define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9)
+#define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8)
+#define GOTGINT_SES_END_DET BIT(2)
+
+#define GAHBCFG HSOTG_REG(0x008)
+#define GAHBCFG_AHB_SINGLE BIT(23)
+#define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22)
+#define GAHBCFG_REM_MEM_SUPP BIT(21)
+#define GAHBCFG_P_TXF_EMP_LVL BIT(8)
+#define GAHBCFG_NP_TXF_EMP_LVL BIT(7)
+#define GAHBCFG_DMA_EN BIT(5)
+#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
+#define GAHBCFG_HBSTLEN_SHIFT 1
+#define GAHBCFG_HBSTLEN_SINGLE 0
+#define GAHBCFG_HBSTLEN_INCR 1
+#define GAHBCFG_HBSTLEN_INCR4 3
+#define GAHBCFG_HBSTLEN_INCR8 5
+#define GAHBCFG_HBSTLEN_INCR16 7
+#define GAHBCFG_GLBL_INTR_EN BIT(0)
+#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
+ GAHBCFG_NP_TXF_EMP_LVL | \
+ GAHBCFG_DMA_EN | \
+ GAHBCFG_GLBL_INTR_EN)
+
+#define GUSBCFG HSOTG_REG(0x00C)
+#define GUSBCFG_FORCEDEVMODE BIT(30)
+#define GUSBCFG_FORCEHOSTMODE BIT(29)
+#define GUSBCFG_TXENDDELAY BIT(28)
+#define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27)
+#define GUSBCFG_ICUSBCAP BIT(26)
+#define GUSBCFG_ULPI_INT_PROT_DIS BIT(25)
+#define GUSBCFG_INDICATORPASSTHROUGH BIT(24)
+#define GUSBCFG_INDICATORCOMPLEMENT BIT(23)
+#define GUSBCFG_TERMSELDLPULSE BIT(22)
+#define GUSBCFG_ULPI_INT_VBUS_IND BIT(21)
+#define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20)
+#define GUSBCFG_ULPI_CLK_SUSP_M BIT(19)
+#define GUSBCFG_ULPI_AUTO_RES BIT(18)
+#define GUSBCFG_ULPI_FS_LS BIT(17)
+#define GUSBCFG_OTG_UTMI_FS_SEL BIT(16)
+#define GUSBCFG_PHY_LP_CLK_SEL BIT(15)
+#define GUSBCFG_USBTRDTIM_MASK (0xf << 10)
+#define GUSBCFG_USBTRDTIM_SHIFT 10
+#define GUSBCFG_HNPCAP BIT(9)
+#define GUSBCFG_SRPCAP BIT(8)
+#define GUSBCFG_DDRSEL BIT(7)
+#define GUSBCFG_PHYSEL BIT(6)
+#define GUSBCFG_FSINTF BIT(5)
+#define GUSBCFG_ULPI_UTMI_SEL BIT(4)
+#define GUSBCFG_PHYIF16 BIT(3)
+#define GUSBCFG_PHYIF8 (0 << 3)
+#define GUSBCFG_TOUTCAL_MASK (0x7 << 0)
+#define GUSBCFG_TOUTCAL_SHIFT 0
+#define GUSBCFG_TOUTCAL_LIMIT 0x7
+#define GUSBCFG_TOUTCAL(_x) ((_x) << 0)
+
+#define GRSTCTL HSOTG_REG(0x010)
+#define GRSTCTL_AHBIDLE BIT(31)
+#define GRSTCTL_DMAREQ BIT(30)
+#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
+#define GRSTCTL_TXFNUM_SHIFT 6
+#define GRSTCTL_TXFNUM_LIMIT 0x1f
+#define GRSTCTL_TXFNUM(_x) ((_x) << 6)
+#define GRSTCTL_TXFFLSH BIT(5)
+#define GRSTCTL_RXFFLSH BIT(4)
+#define GRSTCTL_IN_TKNQ_FLSH BIT(3)
+#define GRSTCTL_FRMCNTRRST BIT(2)
+#define GRSTCTL_HSFTRST BIT(1)
+#define GRSTCTL_CSFTRST BIT(0)
+
+#define GINTSTS HSOTG_REG(0x014)
+#define GINTMSK HSOTG_REG(0x018)
+#define GINTSTS_WKUPINT BIT(31)
+#define GINTSTS_SESSREQINT BIT(30)
+#define GINTSTS_DISCONNINT BIT(29)
+#define GINTSTS_CONIDSTSCHNG BIT(28)
+#define GINTSTS_LPMTRANRCVD BIT(27)
+#define GINTSTS_PTXFEMP BIT(26)
+#define GINTSTS_HCHINT BIT(25)
+#define GINTSTS_PRTINT BIT(24)
+#define GINTSTS_RESETDET BIT(23)
+#define GINTSTS_FET_SUSP BIT(22)
+#define GINTSTS_INCOMPL_IP BIT(21)
+#define GINTSTS_INCOMPL_SOOUT BIT(21)
+#define GINTSTS_INCOMPL_SOIN BIT(20)
+#define GINTSTS_OEPINT BIT(19)
+#define GINTSTS_IEPINT BIT(18)
+#define GINTSTS_EPMIS BIT(17)
+#define GINTSTS_RESTOREDONE BIT(16)
+#define GINTSTS_EOPF BIT(15)
+#define GINTSTS_ISOUTDROP BIT(14)
+#define GINTSTS_ENUMDONE BIT(13)
+#define GINTSTS_USBRST BIT(12)
+#define GINTSTS_USBSUSP BIT(11)
+#define GINTSTS_ERLYSUSP BIT(10)
+#define GINTSTS_I2CINT BIT(9)
+#define GINTSTS_ULPI_CK_INT BIT(8)
+#define GINTSTS_GOUTNAKEFF BIT(7)
+#define GINTSTS_GINNAKEFF BIT(6)
+#define GINTSTS_NPTXFEMP BIT(5)
+#define GINTSTS_RXFLVL BIT(4)
+#define GINTSTS_SOF BIT(3)
+#define GINTSTS_OTGINT BIT(2)
+#define GINTSTS_MODEMIS BIT(1)
+#define GINTSTS_CURMODE_HOST BIT(0)
+
+#define GRXSTSR HSOTG_REG(0x01C)
+#define GRXSTSP HSOTG_REG(0x020)
+#define GRXSTS_FN_MASK (0x7f << 25)
+#define GRXSTS_FN_SHIFT 25
+#define GRXSTS_PKTSTS_MASK (0xf << 17)
+#define GRXSTS_PKTSTS_SHIFT 17
+#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
+#define GRXSTS_PKTSTS_OUTRX 2
+#define GRXSTS_PKTSTS_HCHIN 2
+#define GRXSTS_PKTSTS_OUTDONE 3
+#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
+#define GRXSTS_PKTSTS_SETUPDONE 4
+#define GRXSTS_PKTSTS_DATATOGGLEERR 5
+#define GRXSTS_PKTSTS_SETUPRX 6
+#define GRXSTS_PKTSTS_HCHHALTED 7
+#define GRXSTS_HCHNUM_MASK (0xf << 0)
+#define GRXSTS_HCHNUM_SHIFT 0
+#define GRXSTS_DPID_MASK (0x3 << 15)
+#define GRXSTS_DPID_SHIFT 15
+#define GRXSTS_BYTECNT_MASK (0x7ff << 4)
+#define GRXSTS_BYTECNT_SHIFT 4
+#define GRXSTS_EPNUM_MASK (0xf << 0)
+#define GRXSTS_EPNUM_SHIFT 0
+
+#define GRXFSIZ HSOTG_REG(0x024)
+#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
+#define GRXFSIZ_DEPTH_SHIFT 0
+
+#define GNPTXFSIZ HSOTG_REG(0x028)
/* Use FIFOSIZE_* constants to access this register */
-#define GNPTXSTS HSOTG_REG(0x02C)
-#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
-#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24
-#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16)
-#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16
-#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff)
-#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0)
-#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0
-#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff)
-
-#define GI2CCTL HSOTG_REG(0x0030)
-#define GI2CCTL_BSYDNE BIT(31)
-#define GI2CCTL_RW BIT(30)
-#define GI2CCTL_I2CDATSE0 BIT(28)
-#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26)
-#define GI2CCTL_I2CDEVADDR_SHIFT 26
-#define GI2CCTL_I2CSUSPCTL BIT(25)
-#define GI2CCTL_ACK BIT(24)
-#define GI2CCTL_I2CEN BIT(23)
-#define GI2CCTL_ADDR_MASK (0x7f << 16)
-#define GI2CCTL_ADDR_SHIFT 16
-#define GI2CCTL_REGADDR_MASK (0xff << 8)
-#define GI2CCTL_REGADDR_SHIFT 8
-#define GI2CCTL_RWDATA_MASK (0xff << 0)
-#define GI2CCTL_RWDATA_SHIFT 0
-
-#define GPVNDCTL HSOTG_REG(0x0034)
-#define GGPIO HSOTG_REG(0x0038)
-#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
-
-#define GUID HSOTG_REG(0x003c)
-#define GSNPSID HSOTG_REG(0x0040)
-#define GHWCFG1 HSOTG_REG(0x0044)
-#define GSNPSID_ID_MASK GENMASK(31, 16)
-
-#define GHWCFG2 HSOTG_REG(0x0048)
-#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31)
-#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26)
-#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26
-#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24)
-#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24
-#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22)
-#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22
-#define GHWCFG2_MULTI_PROC_INT BIT(20)
-#define GHWCFG2_DYNAMIC_FIFO BIT(19)
-#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18)
-#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14)
-#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14
-#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10)
-#define GHWCFG2_NUM_DEV_EP_SHIFT 10
-#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
-#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
-#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
-#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
-#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
-#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
-#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
-#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
-#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
-#define GHWCFG2_HS_PHY_TYPE_UTMI 1
-#define GHWCFG2_HS_PHY_TYPE_ULPI 2
-#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
-#define GHWCFG2_POINT2POINT BIT(5)
-#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
-#define GHWCFG2_ARCHITECTURE_SHIFT 3
-#define GHWCFG2_SLAVE_ONLY_ARCH 0
-#define GHWCFG2_EXT_DMA_ARCH 1
-#define GHWCFG2_INT_DMA_ARCH 2
-#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
-#define GHWCFG2_OP_MODE_SHIFT 0
-#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
-#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
-#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
-#define GHWCFG2_OP_MODE_UNDEFINED 7
-
-#define GHWCFG3 HSOTG_REG(0x004c)
-#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
-#define GHWCFG3_DFIFO_DEPTH_SHIFT 16
-#define GHWCFG3_OTG_LPM_EN BIT(15)
-#define GHWCFG3_BC_SUPPORT BIT(14)
-#define GHWCFG3_OTG_ENABLE_HSIC BIT(13)
-#define GHWCFG3_ADP_SUPP BIT(12)
-#define GHWCFG3_SYNCH_RESET_TYPE BIT(11)
-#define GHWCFG3_OPTIONAL_FEATURES BIT(10)
-#define GHWCFG3_VENDOR_CTRL_IF BIT(9)
-#define GHWCFG3_I2C BIT(8)
-#define GHWCFG3_OTG_FUNC BIT(7)
-#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4)
-#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4
-#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0)
-#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0
-
-#define GHWCFG4 HSOTG_REG(0x0050)
-#define GHWCFG4_DESC_DMA_DYN BIT(31)
-#define GHWCFG4_DESC_DMA BIT(30)
-#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
-#define GHWCFG4_NUM_IN_EPS_SHIFT 26
-#define GHWCFG4_DED_FIFO_EN BIT(25)
-#define GHWCFG4_DED_FIFO_SHIFT 25
-#define GHWCFG4_SESSION_END_FILT_EN BIT(24)
-#define GHWCFG4_B_VALID_FILT_EN BIT(23)
-#define GHWCFG4_A_VALID_FILT_EN BIT(22)
-#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21)
-#define GHWCFG4_IDDIG_FILT_EN BIT(20)
-#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16)
-#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
-#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
-#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
-#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
-#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
-#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
-#define GHWCFG4_ACG_SUPPORTED BIT(12)
-#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GNPTXSTS HSOTG_REG(0x02C)
+#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
+#define GNPTXSTS_NP_TXQ_TOP_SHIFT 24
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_MASK (0xff << 16)
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_SHIFT 16
+#define GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(_v) (((_v) >> 16) & 0xff)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_MASK (0xffff << 0)
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_SHIFT 0
+#define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff)
+
+#define GI2CCTL HSOTG_REG(0x0030)
+#define GI2CCTL_BSYDNE BIT(31)
+#define GI2CCTL_RW BIT(30)
+#define GI2CCTL_I2CDATSE0 BIT(28)
+#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26)
+#define GI2CCTL_I2CDEVADDR_SHIFT 26
+#define GI2CCTL_I2CSUSPCTL BIT(25)
+#define GI2CCTL_ACK BIT(24)
+#define GI2CCTL_I2CEN BIT(23)
+#define GI2CCTL_ADDR_MASK (0x7f << 16)
+#define GI2CCTL_ADDR_SHIFT 16
+#define GI2CCTL_REGADDR_MASK (0xff << 8)
+#define GI2CCTL_REGADDR_SHIFT 8
+#define GI2CCTL_RWDATA_MASK (0xff << 0)
+#define GI2CCTL_RWDATA_SHIFT 0
+
+#define GPVNDCTL HSOTG_REG(0x0034)
+#define GGPIO HSOTG_REG(0x0038)
+#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
+
+#define GUID HSOTG_REG(0x003c)
+#define GSNPSID HSOTG_REG(0x0040)
+#define GHWCFG1 HSOTG_REG(0x0044)
+#define GSNPSID_ID_MASK GENMASK(31, 16)
+
+#define GHWCFG2 HSOTG_REG(0x0048)
+#define GHWCFG2_OTG_ENABLE_IC_USB BIT(31)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26)
+#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24)
+#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22)
+#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22
+#define GHWCFG2_MULTI_PROC_INT BIT(20)
+#define GHWCFG2_DYNAMIC_FIFO BIT(19)
+#define GHWCFG2_PERIO_EP_SUPPORTED BIT(18)
+#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14)
+#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14
+#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10)
+#define GHWCFG2_NUM_DEV_EP_SHIFT 10
+#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
+#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
+#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
+#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
+#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
+#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
+#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
+#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_HS_PHY_TYPE_UTMI 1
+#define GHWCFG2_HS_PHY_TYPE_ULPI 2
+#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+#define GHWCFG2_POINT2POINT BIT(5)
+#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
+#define GHWCFG2_ARCHITECTURE_SHIFT 3
+#define GHWCFG2_SLAVE_ONLY_ARCH 0
+#define GHWCFG2_EXT_DMA_ARCH 1
+#define GHWCFG2_INT_DMA_ARCH 2
+#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
+#define GHWCFG2_OP_MODE_SHIFT 0
+#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
+#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
+#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+#define GHWCFG2_OP_MODE_UNDEFINED 7
+
+#define GHWCFG3 HSOTG_REG(0x004c)
+#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
+#define GHWCFG3_DFIFO_DEPTH_SHIFT 16
+#define GHWCFG3_OTG_LPM_EN BIT(15)
+#define GHWCFG3_BC_SUPPORT BIT(14)
+#define GHWCFG3_OTG_ENABLE_HSIC BIT(13)
+#define GHWCFG3_ADP_SUPP BIT(12)
+#define GHWCFG3_SYNCH_RESET_TYPE BIT(11)
+#define GHWCFG3_OPTIONAL_FEATURES BIT(10)
+#define GHWCFG3_VENDOR_CTRL_IF BIT(9)
+#define GHWCFG3_I2C BIT(8)
+#define GHWCFG3_OTG_FUNC BIT(7)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4)
+#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0)
+#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0
+
+#define GHWCFG4 HSOTG_REG(0x0050)
+#define GHWCFG4_DESC_DMA_DYN BIT(31)
+#define GHWCFG4_DESC_DMA BIT(30)
+#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26)
+#define GHWCFG4_NUM_IN_EPS_SHIFT 26
+#define GHWCFG4_DED_FIFO_EN BIT(25)
+#define GHWCFG4_DED_FIFO_SHIFT 25
+#define GHWCFG4_SESSION_END_FILT_EN BIT(24)
+#define GHWCFG4_B_VALID_FILT_EN BIT(23)
+#define GHWCFG4_A_VALID_FILT_EN BIT(22)
+#define GHWCFG4_VBUS_VALID_FILT_EN BIT(21)
+#define GHWCFG4_IDDIG_FILT_EN BIT(20)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16)
+#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
+#define GHWCFG4_ACG_SUPPORTED BIT(12)
+#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
-#define GHWCFG4_XHIBER BIT(7)
-#define GHWCFG4_HIBER BIT(6)
-#define GHWCFG4_MIN_AHB_FREQ BIT(5)
-#define GHWCFG4_POWER_OPTIMIZ BIT(4)
-#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0)
-#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0
-
-#define GLPMCFG HSOTG_REG(0x0054)
-#define GLPMCFG_INVSELHSIC BIT(31)
-#define GLPMCFG_HSICCON BIT(30)
-#define GLPMCFG_RSTRSLPSTS BIT(29)
-#define GLPMCFG_ENBESL BIT(28)
-#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25)
-#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25
-#define GLPMCFG_SNDLPM BIT(24)
-#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21)
-#define GLPMCFG_RETRY_CNT_SHIFT 21
-#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21)
-#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22)
-#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17)
-#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17
-#define GLPMCFG_L1RESUMEOK BIT(16)
-#define GLPMCFG_SLPSTS BIT(15)
-#define GLPMCFG_COREL1RES_MASK (0x3 << 13)
-#define GLPMCFG_COREL1RES_SHIFT 13
-#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8)
-#define GLPMCFG_HIRD_THRES_SHIFT 8
-#define GLPMCFG_HIRD_THRES_EN (0x10 << 8)
-#define GLPMCFG_ENBLSLPM BIT(7)
-#define GLPMCFG_BREMOTEWAKE BIT(6)
-#define GLPMCFG_HIRD_MASK (0xf << 2)
-#define GLPMCFG_HIRD_SHIFT 2
-#define GLPMCFG_APPL1RES BIT(1)
-#define GLPMCFG_LPMCAP BIT(0)
-
-#define GPWRDN HSOTG_REG(0x0058)
-#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24)
-#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24
-#define GPWRDN_ADP_INT BIT(23)
-#define GPWRDN_BSESSVLD BIT(22)
-#define GPWRDN_IDSTS BIT(21)
-#define GPWRDN_LINESTATE_MASK (0x3 << 19)
-#define GPWRDN_LINESTATE_SHIFT 19
-#define GPWRDN_STS_CHGINT_MSK BIT(18)
-#define GPWRDN_STS_CHGINT BIT(17)
-#define GPWRDN_SRP_DET_MSK BIT(16)
-#define GPWRDN_SRP_DET BIT(15)
-#define GPWRDN_CONNECT_DET_MSK BIT(14)
-#define GPWRDN_CONNECT_DET BIT(13)
-#define GPWRDN_DISCONN_DET_MSK BIT(12)
-#define GPWRDN_DISCONN_DET BIT(11)
-#define GPWRDN_RST_DET_MSK BIT(10)
-#define GPWRDN_RST_DET BIT(9)
-#define GPWRDN_LNSTSCHG_MSK BIT(8)
-#define GPWRDN_LNSTSCHG BIT(7)
-#define GPWRDN_DIS_VBUS BIT(6)
-#define GPWRDN_PWRDNSWTCH BIT(5)
-#define GPWRDN_PWRDNRSTN BIT(4)
-#define GPWRDN_PWRDNCLMP BIT(3)
-#define GPWRDN_RESTORE BIT(2)
-#define GPWRDN_PMUACTV BIT(1)
-#define GPWRDN_PMUINTSEL BIT(0)
-
-#define GDFIFOCFG HSOTG_REG(0x005c)
-#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16)
-#define GDFIFOCFG_EPINFOBASE_SHIFT 16
-#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0)
-#define GDFIFOCFG_GDFIFOCFG_SHIFT 0
-
-#define ADPCTL HSOTG_REG(0x0060)
-#define ADPCTL_AR_MASK (0x3 << 27)
-#define ADPCTL_AR_SHIFT 27
-#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26)
-#define ADPCTL_ADP_SNS_INT_MSK BIT(25)
-#define ADPCTL_ADP_PRB_INT_MSK BIT(24)
-#define ADPCTL_ADP_TMOUT_INT BIT(23)
-#define ADPCTL_ADP_SNS_INT BIT(22)
-#define ADPCTL_ADP_PRB_INT BIT(21)
-#define ADPCTL_ADPENA BIT(20)
-#define ADPCTL_ADPRES BIT(19)
-#define ADPCTL_ENASNS BIT(18)
-#define ADPCTL_ENAPRB BIT(17)
-#define ADPCTL_RTIM_MASK (0x7ff << 6)
-#define ADPCTL_RTIM_SHIFT 6
-#define ADPCTL_PRB_PER_MASK (0x3 << 4)
-#define ADPCTL_PRB_PER_SHIFT 4
-#define ADPCTL_PRB_DELTA_MASK (0x3 << 2)
-#define ADPCTL_PRB_DELTA_SHIFT 2
-#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
-#define ADPCTL_PRB_DSCHRG_SHIFT 0
-
-#define GREFCLK HSOTG_REG(0x0064)
-#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
-#define GREFCLK_REFCLKPER_SHIFT 15
-#define GREFCLK_REF_CLK_MODE BIT(14)
-#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GHWCFG4_XHIBER BIT(7)
+#define GHWCFG4_HIBER BIT(6)
+#define GHWCFG4_MIN_AHB_FREQ BIT(5)
+#define GHWCFG4_POWER_OPTIMIZ BIT(4)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0)
+#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0
+
+#define GLPMCFG HSOTG_REG(0x0054)
+#define GLPMCFG_INVSELHSIC BIT(31)
+#define GLPMCFG_HSICCON BIT(30)
+#define GLPMCFG_RSTRSLPSTS BIT(29)
+#define GLPMCFG_ENBESL BIT(28)
+#define GLPMCFG_LPM_RETRYCNT_STS_MASK (0x7 << 25)
+#define GLPMCFG_LPM_RETRYCNT_STS_SHIFT 25
+#define GLPMCFG_SNDLPM BIT(24)
+#define GLPMCFG_RETRY_CNT_MASK (0x7 << 21)
+#define GLPMCFG_RETRY_CNT_SHIFT 21
+#define GLPMCFG_LPM_REJECT_CTRL_CONTROL BIT(21)
+#define GLPMCFG_LPM_ACCEPT_CTRL_ISOC BIT(22)
+#define GLPMCFG_LPM_CHNL_INDX_MASK (0xf << 17)
+#define GLPMCFG_LPM_CHNL_INDX_SHIFT 17
+#define GLPMCFG_L1RESUMEOK BIT(16)
+#define GLPMCFG_SLPSTS BIT(15)
+#define GLPMCFG_COREL1RES_MASK (0x3 << 13)
+#define GLPMCFG_COREL1RES_SHIFT 13
+#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8)
+#define GLPMCFG_HIRD_THRES_SHIFT 8
+#define GLPMCFG_HIRD_THRES_EN (0x10 << 8)
+#define GLPMCFG_ENBLSLPM BIT(7)
+#define GLPMCFG_BREMOTEWAKE BIT(6)
+#define GLPMCFG_HIRD_MASK (0xf << 2)
+#define GLPMCFG_HIRD_SHIFT 2
+#define GLPMCFG_APPL1RES BIT(1)
+#define GLPMCFG_LPMCAP BIT(0)
+
+#define GPWRDN HSOTG_REG(0x0058)
+#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24)
+#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24
+#define GPWRDN_ADP_INT BIT(23)
+#define GPWRDN_BSESSVLD BIT(22)
+#define GPWRDN_IDSTS BIT(21)
+#define GPWRDN_LINESTATE_MASK (0x3 << 19)
+#define GPWRDN_LINESTATE_SHIFT 19
+#define GPWRDN_STS_CHGINT_MSK BIT(18)
+#define GPWRDN_STS_CHGINT BIT(17)
+#define GPWRDN_SRP_DET_MSK BIT(16)
+#define GPWRDN_SRP_DET BIT(15)
+#define GPWRDN_CONNECT_DET_MSK BIT(14)
+#define GPWRDN_CONNECT_DET BIT(13)
+#define GPWRDN_DISCONN_DET_MSK BIT(12)
+#define GPWRDN_DISCONN_DET BIT(11)
+#define GPWRDN_RST_DET_MSK BIT(10)
+#define GPWRDN_RST_DET BIT(9)
+#define GPWRDN_LNSTSCHG_MSK BIT(8)
+#define GPWRDN_LNSTSCHG BIT(7)
+#define GPWRDN_DIS_VBUS BIT(6)
+#define GPWRDN_PWRDNSWTCH BIT(5)
+#define GPWRDN_PWRDNRSTN BIT(4)
+#define GPWRDN_PWRDNCLMP BIT(3)
+#define GPWRDN_RESTORE BIT(2)
+#define GPWRDN_PMUACTV BIT(1)
+#define GPWRDN_PMUINTSEL BIT(0)
+
+#define GDFIFOCFG HSOTG_REG(0x005c)
+#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16)
+#define GDFIFOCFG_EPINFOBASE_SHIFT 16
+#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0)
+#define GDFIFOCFG_GDFIFOCFG_SHIFT 0
+
+#define ADPCTL HSOTG_REG(0x0060)
+#define ADPCTL_AR_MASK (0x3 << 27)
+#define ADPCTL_AR_SHIFT 27
+#define ADPCTL_ADP_TMOUT_INT_MSK BIT(26)
+#define ADPCTL_ADP_SNS_INT_MSK BIT(25)
+#define ADPCTL_ADP_PRB_INT_MSK BIT(24)
+#define ADPCTL_ADP_TMOUT_INT BIT(23)
+#define ADPCTL_ADP_SNS_INT BIT(22)
+#define ADPCTL_ADP_PRB_INT BIT(21)
+#define ADPCTL_ADPENA BIT(20)
+#define ADPCTL_ADPRES BIT(19)
+#define ADPCTL_ENASNS BIT(18)
+#define ADPCTL_ENAPRB BIT(17)
+#define ADPCTL_RTIM_MASK (0x7ff << 6)
+#define ADPCTL_RTIM_SHIFT 6
+#define ADPCTL_PRB_PER_MASK (0x3 << 4)
+#define ADPCTL_PRB_PER_SHIFT 4
+#define ADPCTL_PRB_DELTA_MASK (0x3 << 2)
+#define ADPCTL_PRB_DELTA_SHIFT 2
+#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
+#define ADPCTL_PRB_DSCHRG_SHIFT 0
+
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
-#define GINTMSK2 HSOTG_REG(0x0068)
-#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
-#define GINTSTS2 HSOTG_REG(0x006c)
-#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
-#define HPTXFSIZ HSOTG_REG(0x100)
+#define HPTXFSIZ HSOTG_REG(0x100)
/* Use FIFOSIZE_* constants to access this register */
-#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
+#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
/* Use FIFOSIZE_* constants to access this register */
/* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */
-#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
-#define FIFOSIZE_DEPTH_SHIFT 16
-#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
-#define FIFOSIZE_STARTADDR_SHIFT 0
-#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff)
+#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
+#define FIFOSIZE_DEPTH_SHIFT 16
+#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
+#define FIFOSIZE_STARTADDR_SHIFT 0
+#define FIFOSIZE_DEPTH_GET(_x) (((_x) >> 16) & 0xffff)
/* Device mode registers */
-#define DCFG HSOTG_REG(0x800)
-#define DCFG_DESCDMA_EN BIT(23)
-#define DCFG_EPMISCNT_MASK (0x1f << 18)
-#define DCFG_EPMISCNT_SHIFT 18
-#define DCFG_EPMISCNT_LIMIT 0x1f
-#define DCFG_EPMISCNT(_x) ((_x) << 18)
-#define DCFG_IPG_ISOC_SUPPORDED BIT(17)
-#define DCFG_PERFRINT_MASK (0x3 << 11)
-#define DCFG_PERFRINT_SHIFT 11
-#define DCFG_PERFRINT_LIMIT 0x3
-#define DCFG_PERFRINT(_x) ((_x) << 11)
-#define DCFG_DEVADDR_MASK (0x7f << 4)
-#define DCFG_DEVADDR_SHIFT 4
-#define DCFG_DEVADDR_LIMIT 0x7f
-#define DCFG_DEVADDR(_x) ((_x) << 4)
-#define DCFG_NZ_STS_OUT_HSHK BIT(2)
-#define DCFG_DEVSPD_MASK (0x3 << 0)
-#define DCFG_DEVSPD_SHIFT 0
-#define DCFG_DEVSPD_HS 0
-#define DCFG_DEVSPD_FS 1
-#define DCFG_DEVSPD_LS 2
-#define DCFG_DEVSPD_FS48 3
-
-#define DCTL HSOTG_REG(0x804)
+#define DCFG HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN BIT(23)
+#define DCFG_EPMISCNT_MASK (0x1f << 18)
+#define DCFG_EPMISCNT_SHIFT 18
+#define DCFG_EPMISCNT_LIMIT 0x1f
+#define DCFG_EPMISCNT(_x) ((_x) << 18)
+#define DCFG_IPG_ISOC_SUPPORDED BIT(17)
+#define DCFG_PERFRINT_MASK (0x3 << 11)
+#define DCFG_PERFRINT_SHIFT 11
+#define DCFG_PERFRINT_LIMIT 0x3
+#define DCFG_PERFRINT(_x) ((_x) << 11)
+#define DCFG_DEVADDR_MASK (0x7f << 4)
+#define DCFG_DEVADDR_SHIFT 4
+#define DCFG_DEVADDR_LIMIT 0x7f
+#define DCFG_DEVADDR(_x) ((_x) << 4)
+#define DCFG_NZ_STS_OUT_HSHK BIT(2)
+#define DCFG_DEVSPD_MASK (0x3 << 0)
+#define DCFG_DEVSPD_SHIFT 0
+#define DCFG_DEVSPD_HS 0
+#define DCFG_DEVSPD_FS 1
+#define DCFG_DEVSPD_LS 2
+#define DCFG_DEVSPD_FS48 3
+
+#define DCTL HSOTG_REG(0x804)
#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
-#define DCTL_PWRONPRGDONE BIT(11)
-#define DCTL_CGOUTNAK BIT(10)
-#define DCTL_SGOUTNAK BIT(9)
-#define DCTL_CGNPINNAK BIT(8)
-#define DCTL_SGNPINNAK BIT(7)
-#define DCTL_TSTCTL_MASK (0x7 << 4)
-#define DCTL_TSTCTL_SHIFT 4
-#define DCTL_GOUTNAKSTS BIT(3)
-#define DCTL_GNPINNAKSTS BIT(2)
-#define DCTL_SFTDISCON BIT(1)
-#define DCTL_RMTWKUPSIG BIT(0)
-
-#define DSTS HSOTG_REG(0x808)
-#define DSTS_SOFFN_MASK (0x3fff << 8)
-#define DSTS_SOFFN_SHIFT 8
-#define DSTS_SOFFN_LIMIT 0x3fff
-#define DSTS_SOFFN(_x) ((_x) << 8)
-#define DSTS_ERRATICERR BIT(3)
-#define DSTS_ENUMSPD_MASK (0x3 << 1)
-#define DSTS_ENUMSPD_SHIFT 1
-#define DSTS_ENUMSPD_HS 0
-#define DSTS_ENUMSPD_FS 1
-#define DSTS_ENUMSPD_LS 2
-#define DSTS_ENUMSPD_FS48 3
-#define DSTS_SUSPSTS BIT(0)
-
-#define DIEPMSK HSOTG_REG(0x810)
-#define DIEPMSK_NAKMSK BIT(13)
-#define DIEPMSK_BNAININTRMSK BIT(9)
-#define DIEPMSK_TXFIFOUNDRNMSK BIT(8)
-#define DIEPMSK_TXFIFOEMPTY BIT(7)
-#define DIEPMSK_INEPNAKEFFMSK BIT(6)
-#define DIEPMSK_INTKNEPMISMSK BIT(5)
-#define DIEPMSK_INTKNTXFEMPMSK BIT(4)
-#define DIEPMSK_TIMEOUTMSK BIT(3)
-#define DIEPMSK_AHBERRMSK BIT(2)
-#define DIEPMSK_EPDISBLDMSK BIT(1)
-#define DIEPMSK_XFERCOMPLMSK BIT(0)
-
-#define DOEPMSK HSOTG_REG(0x814)
-#define DOEPMSK_BNAMSK BIT(9)
-#define DOEPMSK_BACK2BACKSETUP BIT(6)
-#define DOEPMSK_STSPHSERCVDMSK BIT(5)
-#define DOEPMSK_OUTTKNEPDISMSK BIT(4)
-#define DOEPMSK_SETUPMSK BIT(3)
-#define DOEPMSK_AHBERRMSK BIT(2)
-#define DOEPMSK_EPDISBLDMSK BIT(1)
-#define DOEPMSK_XFERCOMPLMSK BIT(0)
-
-#define DAINT HSOTG_REG(0x818)
-#define DAINTMSK HSOTG_REG(0x81C)
-#define DAINT_OUTEP_SHIFT 16
-#define DAINT_OUTEP(_x) (1 << ((_x) + 16))
-#define DAINT_INEP(_x) (1 << (_x))
-
-#define DTKNQR1 HSOTG_REG(0x820)
-#define DTKNQR2 HSOTG_REG(0x824)
-#define DTKNQR3 HSOTG_REG(0x830)
-#define DTKNQR4 HSOTG_REG(0x834)
-#define DIEPEMPMSK HSOTG_REG(0x834)
-
-#define DVBUSDIS HSOTG_REG(0x828)
-#define DVBUSPULSE HSOTG_REG(0x82C)
-
-#define DIEPCTL0 HSOTG_REG(0x900)
-#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20))
-
-#define DOEPCTL0 HSOTG_REG(0xB00)
-#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20))
+#define DCTL_PWRONPRGDONE BIT(11)
+#define DCTL_CGOUTNAK BIT(10)
+#define DCTL_SGOUTNAK BIT(9)
+#define DCTL_CGNPINNAK BIT(8)
+#define DCTL_SGNPINNAK BIT(7)
+#define DCTL_TSTCTL_MASK (0x7 << 4)
+#define DCTL_TSTCTL_SHIFT 4
+#define DCTL_GOUTNAKSTS BIT(3)
+#define DCTL_GNPINNAKSTS BIT(2)
+#define DCTL_SFTDISCON BIT(1)
+#define DCTL_RMTWKUPSIG BIT(0)
+
+#define DSTS HSOTG_REG(0x808)
+#define DSTS_SOFFN_MASK (0x3fff << 8)
+#define DSTS_SOFFN_SHIFT 8
+#define DSTS_SOFFN_LIMIT 0x3fff
+#define DSTS_SOFFN(_x) ((_x) << 8)
+#define DSTS_ERRATICERR BIT(3)
+#define DSTS_ENUMSPD_MASK (0x3 << 1)
+#define DSTS_ENUMSPD_SHIFT 1
+#define DSTS_ENUMSPD_HS 0
+#define DSTS_ENUMSPD_FS 1
+#define DSTS_ENUMSPD_LS 2
+#define DSTS_ENUMSPD_FS48 3
+#define DSTS_SUSPSTS BIT(0)
+
+#define DIEPMSK HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK BIT(13)
+#define DIEPMSK_BNAININTRMSK BIT(9)
+#define DIEPMSK_TXFIFOUNDRNMSK BIT(8)
+#define DIEPMSK_TXFIFOEMPTY BIT(7)
+#define DIEPMSK_INEPNAKEFFMSK BIT(6)
+#define DIEPMSK_INTKNEPMISMSK BIT(5)
+#define DIEPMSK_INTKNTXFEMPMSK BIT(4)
+#define DIEPMSK_TIMEOUTMSK BIT(3)
+#define DIEPMSK_AHBERRMSK BIT(2)
+#define DIEPMSK_EPDISBLDMSK BIT(1)
+#define DIEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DOEPMSK HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK BIT(9)
+#define DOEPMSK_BACK2BACKSETUP BIT(6)
+#define DOEPMSK_STSPHSERCVDMSK BIT(5)
+#define DOEPMSK_OUTTKNEPDISMSK BIT(4)
+#define DOEPMSK_SETUPMSK BIT(3)
+#define DOEPMSK_AHBERRMSK BIT(2)
+#define DOEPMSK_EPDISBLDMSK BIT(1)
+#define DOEPMSK_XFERCOMPLMSK BIT(0)
+
+#define DAINT HSOTG_REG(0x818)
+#define DAINTMSK HSOTG_REG(0x81C)
+#define DAINT_OUTEP_SHIFT 16
+#define DAINT_OUTEP(_x) (1 << ((_x) + 16))
+#define DAINT_INEP(_x) (1 << (_x))
+
+#define DTKNQR1 HSOTG_REG(0x820)
+#define DTKNQR2 HSOTG_REG(0x824)
+#define DTKNQR3 HSOTG_REG(0x830)
+#define DTKNQR4 HSOTG_REG(0x834)
+#define DIEPEMPMSK HSOTG_REG(0x834)
+
+#define DVBUSDIS HSOTG_REG(0x828)
+#define DVBUSPULSE HSOTG_REG(0x82C)
+
+#define DIEPCTL0 HSOTG_REG(0x900)
+#define DIEPCTL(_a) HSOTG_REG(0x900 + ((_a) * 0x20))
+
+#define DOEPCTL0 HSOTG_REG(0xB00)
+#define DOEPCTL(_a) HSOTG_REG(0xB00 + ((_a) * 0x20))
/* EP0 specialness:
* bits[29..28] - reserved (no SetD0PID, SetD1PID)
* bits[25..22] - should always be zero, this isn't a periodic endpoint
* bits[10..0] - MPS setting different for EP0
*/
-#define D0EPCTL_MPS_MASK (0x3 << 0)
-#define D0EPCTL_MPS_SHIFT 0
-#define D0EPCTL_MPS_64 0
-#define D0EPCTL_MPS_32 1
-#define D0EPCTL_MPS_16 2
-#define D0EPCTL_MPS_8 3
-
-#define DXEPCTL_EPENA BIT(31)
-#define DXEPCTL_EPDIS BIT(30)
-#define DXEPCTL_SETD1PID BIT(29)
-#define DXEPCTL_SETODDFR BIT(29)
-#define DXEPCTL_SETD0PID BIT(28)
-#define DXEPCTL_SETEVENFR BIT(28)
-#define DXEPCTL_SNAK BIT(27)
-#define DXEPCTL_CNAK BIT(26)
-#define DXEPCTL_TXFNUM_MASK (0xf << 22)
-#define DXEPCTL_TXFNUM_SHIFT 22
-#define DXEPCTL_TXFNUM_LIMIT 0xf
-#define DXEPCTL_TXFNUM(_x) ((_x) << 22)
-#define DXEPCTL_STALL BIT(21)
-#define DXEPCTL_SNP BIT(20)
-#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
-#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18)
-#define DXEPCTL_EPTYPE_ISO (0x1 << 18)
-#define DXEPCTL_EPTYPE_BULK (0x2 << 18)
-#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18)
-
-#define DXEPCTL_NAKSTS BIT(17)
-#define DXEPCTL_DPID BIT(16)
-#define DXEPCTL_EOFRNUM BIT(16)
-#define DXEPCTL_USBACTEP BIT(15)
-#define DXEPCTL_NEXTEP_MASK (0xf << 11)
-#define DXEPCTL_NEXTEP_SHIFT 11
-#define DXEPCTL_NEXTEP_LIMIT 0xf
-#define DXEPCTL_NEXTEP(_x) ((_x) << 11)
-#define DXEPCTL_MPS_MASK (0x7ff << 0)
-#define DXEPCTL_MPS_SHIFT 0
-#define DXEPCTL_MPS_LIMIT 0x7ff
-#define DXEPCTL_MPS(_x) ((_x) << 0)
-
-#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
-#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
-#define DXEPINT_SETUP_RCVD BIT(15)
-#define DXEPINT_NYETINTRPT BIT(14)
-#define DXEPINT_NAKINTRPT BIT(13)
-#define DXEPINT_BBLEERRINTRPT BIT(12)
-#define DXEPINT_PKTDRPSTS BIT(11)
-#define DXEPINT_BNAINTR BIT(9)
-#define DXEPINT_TXFIFOUNDRN BIT(8)
-#define DXEPINT_OUTPKTERR BIT(8)
-#define DXEPINT_TXFEMP BIT(7)
-#define DXEPINT_INEPNAKEFF BIT(6)
-#define DXEPINT_BACK2BACKSETUP BIT(6)
-#define DXEPINT_INTKNEPMIS BIT(5)
-#define DXEPINT_STSPHSERCVD BIT(5)
-#define DXEPINT_INTKNTXFEMP BIT(4)
-#define DXEPINT_OUTTKNEPDIS BIT(4)
-#define DXEPINT_TIMEOUT BIT(3)
-#define DXEPINT_SETUP BIT(3)
-#define DXEPINT_AHBERR BIT(2)
-#define DXEPINT_EPDISBLD BIT(1)
-#define DXEPINT_XFERCOMPL BIT(0)
-
-#define DIEPTSIZ0 HSOTG_REG(0x910)
-#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19)
-#define DIEPTSIZ0_PKTCNT_SHIFT 19
-#define DIEPTSIZ0_PKTCNT_LIMIT 0x3
-#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19)
-#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
-#define DIEPTSIZ0_XFERSIZE_SHIFT 0
-#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f
-#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0)
-
-#define DOEPTSIZ0 HSOTG_REG(0xB10)
-#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29)
-#define DOEPTSIZ0_SUPCNT_SHIFT 29
-#define DOEPTSIZ0_SUPCNT_LIMIT 0x3
-#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29)
-#define DOEPTSIZ0_PKTCNT BIT(19)
-#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
-#define DOEPTSIZ0_XFERSIZE_SHIFT 0
-
-#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20))
-#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20))
-#define DXEPTSIZ_MC_MASK (0x3 << 29)
-#define DXEPTSIZ_MC_SHIFT 29
-#define DXEPTSIZ_MC_LIMIT 0x3
-#define DXEPTSIZ_MC(_x) ((_x) << 29)
-#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19)
-#define DXEPTSIZ_PKTCNT_SHIFT 19
-#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff
-#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff)
-#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19)
-#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0)
-#define DXEPTSIZ_XFERSIZE_SHIFT 0
-#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff
-#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff)
-#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0)
-
-#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20))
-#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20))
-
-#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20))
-
-#define PCGCTL HSOTG_REG(0x0e00)
-#define PCGCTL_IF_DEV_MODE BIT(31)
-#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29)
-#define PCGCTL_P2HD_PRT_SPD_SHIFT 29
-#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27)
-#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27
-#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20)
-#define PCGCTL_MAC_DEV_ADDR_SHIFT 20
-#define PCGCTL_MAX_TERMSEL BIT(19)
-#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17)
-#define PCGCTL_MAX_XCVRSELECT_SHIFT 17
-#define PCGCTL_PORT_POWER BIT(16)
-#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14)
-#define PCGCTL_PRT_CLK_SEL_SHIFT 14
-#define PCGCTL_ESS_REG_RESTORED BIT(13)
-#define PCGCTL_EXTND_HIBER_SWITCH BIT(12)
-#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11)
-#define PCGCTL_ENBL_EXTND_HIBER BIT(10)
-#define PCGCTL_RESTOREMODE BIT(9)
-#define PCGCTL_RESETAFTSUSP BIT(8)
-#define PCGCTL_DEEP_SLEEP BIT(7)
-#define PCGCTL_PHY_IN_SLEEP BIT(6)
-#define PCGCTL_ENBL_SLEEP_GATING BIT(5)
-#define PCGCTL_RSTPDWNMODULE BIT(3)
-#define PCGCTL_PWRCLMP BIT(2)
-#define PCGCTL_GATEHCLK BIT(1)
-#define PCGCTL_STOPPCLK BIT(0)
+#define D0EPCTL_MPS_MASK (0x3 << 0)
+#define D0EPCTL_MPS_SHIFT 0
+#define D0EPCTL_MPS_64 0
+#define D0EPCTL_MPS_32 1
+#define D0EPCTL_MPS_16 2
+#define D0EPCTL_MPS_8 3
+
+#define DXEPCTL_EPENA BIT(31)
+#define DXEPCTL_EPDIS BIT(30)
+#define DXEPCTL_SETD1PID BIT(29)
+#define DXEPCTL_SETODDFR BIT(29)
+#define DXEPCTL_SETD0PID BIT(28)
+#define DXEPCTL_SETEVENFR BIT(28)
+#define DXEPCTL_SNAK BIT(27)
+#define DXEPCTL_CNAK BIT(26)
+#define DXEPCTL_TXFNUM_MASK (0xf << 22)
+#define DXEPCTL_TXFNUM_SHIFT 22
+#define DXEPCTL_TXFNUM_LIMIT 0xf
+#define DXEPCTL_TXFNUM(_x) ((_x) << 22)
+#define DXEPCTL_STALL BIT(21)
+#define DXEPCTL_SNP BIT(20)
+#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
+#define DXEPCTL_EPTYPE_CONTROL (0x0 << 18)
+#define DXEPCTL_EPTYPE_ISO (0x1 << 18)
+#define DXEPCTL_EPTYPE_BULK (0x2 << 18)
+#define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18)
+
+#define DXEPCTL_NAKSTS BIT(17)
+#define DXEPCTL_DPID BIT(16)
+#define DXEPCTL_EOFRNUM BIT(16)
+#define DXEPCTL_USBACTEP BIT(15)
+#define DXEPCTL_NEXTEP_MASK (0xf << 11)
+#define DXEPCTL_NEXTEP_SHIFT 11
+#define DXEPCTL_NEXTEP_LIMIT 0xf
+#define DXEPCTL_NEXTEP(_x) ((_x) << 11)
+#define DXEPCTL_MPS_MASK (0x7ff << 0)
+#define DXEPCTL_MPS_SHIFT 0
+#define DXEPCTL_MPS_LIMIT 0x7ff
+#define DXEPCTL_MPS(_x) ((_x) << 0)
+
+#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
+#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
+#define DXEPINT_SETUP_RCVD BIT(15)
+#define DXEPINT_NYETINTRPT BIT(14)
+#define DXEPINT_NAKINTRPT BIT(13)
+#define DXEPINT_BBLEERRINTRPT BIT(12)
+#define DXEPINT_PKTDRPSTS BIT(11)
+#define DXEPINT_BNAINTR BIT(9)
+#define DXEPINT_TXFIFOUNDRN BIT(8)
+#define DXEPINT_OUTPKTERR BIT(8)
+#define DXEPINT_TXFEMP BIT(7)
+#define DXEPINT_INEPNAKEFF BIT(6)
+#define DXEPINT_BACK2BACKSETUP BIT(6)
+#define DXEPINT_INTKNEPMIS BIT(5)
+#define DXEPINT_STSPHSERCVD BIT(5)
+#define DXEPINT_INTKNTXFEMP BIT(4)
+#define DXEPINT_OUTTKNEPDIS BIT(4)
+#define DXEPINT_TIMEOUT BIT(3)
+#define DXEPINT_SETUP BIT(3)
+#define DXEPINT_AHBERR BIT(2)
+#define DXEPINT_EPDISBLD BIT(1)
+#define DXEPINT_XFERCOMPL BIT(0)
+
+#define DIEPTSIZ0 HSOTG_REG(0x910)
+#define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19)
+#define DIEPTSIZ0_PKTCNT_SHIFT 19
+#define DIEPTSIZ0_PKTCNT_LIMIT 0x3
+#define DIEPTSIZ0_PKTCNT(_x) ((_x) << 19)
+#define DIEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DIEPTSIZ0_XFERSIZE_SHIFT 0
+#define DIEPTSIZ0_XFERSIZE_LIMIT 0x7f
+#define DIEPTSIZ0_XFERSIZE(_x) ((_x) << 0)
+
+#define DOEPTSIZ0 HSOTG_REG(0xB10)
+#define DOEPTSIZ0_SUPCNT_MASK (0x3 << 29)
+#define DOEPTSIZ0_SUPCNT_SHIFT 29
+#define DOEPTSIZ0_SUPCNT_LIMIT 0x3
+#define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29)
+#define DOEPTSIZ0_PKTCNT BIT(19)
+#define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0)
+#define DOEPTSIZ0_XFERSIZE_SHIFT 0
+
+#define DIEPTSIZ(_a) HSOTG_REG(0x910 + ((_a) * 0x20))
+#define DOEPTSIZ(_a) HSOTG_REG(0xB10 + ((_a) * 0x20))
+#define DXEPTSIZ_MC_MASK (0x3 << 29)
+#define DXEPTSIZ_MC_SHIFT 29
+#define DXEPTSIZ_MC_LIMIT 0x3
+#define DXEPTSIZ_MC(_x) ((_x) << 29)
+#define DXEPTSIZ_PKTCNT_MASK (0x3ff << 19)
+#define DXEPTSIZ_PKTCNT_SHIFT 19
+#define DXEPTSIZ_PKTCNT_LIMIT 0x3ff
+#define DXEPTSIZ_PKTCNT_GET(_v) (((_v) >> 19) & 0x3ff)
+#define DXEPTSIZ_PKTCNT(_x) ((_x) << 19)
+#define DXEPTSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define DXEPTSIZ_XFERSIZE_SHIFT 0
+#define DXEPTSIZ_XFERSIZE_LIMIT 0x7ffff
+#define DXEPTSIZ_XFERSIZE_GET(_v) (((_v) >> 0) & 0x7ffff)
+#define DXEPTSIZ_XFERSIZE(_x) ((_x) << 0)
+
+#define DIEPDMA(_a) HSOTG_REG(0x914 + ((_a) * 0x20))
+#define DOEPDMA(_a) HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20))
+
+#define PCGCTL HSOTG_REG(0x0e00)
+#define PCGCTL_IF_DEV_MODE BIT(31)
+#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29)
+#define PCGCTL_P2HD_PRT_SPD_SHIFT 29
+#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27)
+#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27
+#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20)
+#define PCGCTL_MAC_DEV_ADDR_SHIFT 20
+#define PCGCTL_MAX_TERMSEL BIT(19)
+#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17)
+#define PCGCTL_MAX_XCVRSELECT_SHIFT 17
+#define PCGCTL_PORT_POWER BIT(16)
+#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14)
+#define PCGCTL_PRT_CLK_SEL_SHIFT 14
+#define PCGCTL_ESS_REG_RESTORED BIT(13)
+#define PCGCTL_EXTND_HIBER_SWITCH BIT(12)
+#define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11)
+#define PCGCTL_ENBL_EXTND_HIBER BIT(10)
+#define PCGCTL_RESTOREMODE BIT(9)
+#define PCGCTL_RESETAFTSUSP BIT(8)
+#define PCGCTL_DEEP_SLEEP BIT(7)
+#define PCGCTL_PHY_IN_SLEEP BIT(6)
+#define PCGCTL_ENBL_SLEEP_GATING BIT(5)
+#define PCGCTL_RSTPDWNMODULE BIT(3)
+#define PCGCTL_PWRCLMP BIT(2)
+#define PCGCTL_GATEHCLK BIT(1)
+#define PCGCTL_STOPPCLK BIT(0)
#define PCGCCTL1 HSOTG_REG(0xe04)
#define PCGCCTL1_TIMER (0x3 << 1)
#define PCGCCTL1_GATEEN BIT(0)
-#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000))
+#define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000))
/* Host Mode Registers */
-#define HCFG HSOTG_REG(0x0400)
-#define HCFG_MODECHTIMEN BIT(31)
-#define HCFG_PERSCHEDENA BIT(26)
-#define HCFG_FRLISTEN_MASK (0x3 << 24)
-#define HCFG_FRLISTEN_SHIFT 24
-#define HCFG_FRLISTEN_8 (0 << 24)
-#define FRLISTEN_8_SIZE 8
-#define HCFG_FRLISTEN_16 BIT(24)
-#define FRLISTEN_16_SIZE 16
-#define HCFG_FRLISTEN_32 (2 << 24)
-#define FRLISTEN_32_SIZE 32
-#define HCFG_FRLISTEN_64 (3 << 24)
-#define FRLISTEN_64_SIZE 64
-#define HCFG_DESCDMA BIT(23)
-#define HCFG_RESVALID_MASK (0xff << 8)
-#define HCFG_RESVALID_SHIFT 8
-#define HCFG_ENA32KHZ BIT(7)
-#define HCFG_FSLSSUPP BIT(2)
-#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
-#define HCFG_FSLSPCLKSEL_SHIFT 0
-#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
-#define HCFG_FSLSPCLKSEL_48_MHZ 1
-#define HCFG_FSLSPCLKSEL_6_MHZ 2
-
-#define HFIR HSOTG_REG(0x0404)
-#define HFIR_FRINT_MASK (0xffff << 0)
-#define HFIR_FRINT_SHIFT 0
-#define HFIR_RLDCTRL BIT(16)
-
-#define HFNUM HSOTG_REG(0x0408)
-#define HFNUM_FRREM_MASK (0xffff << 16)
-#define HFNUM_FRREM_SHIFT 16
-#define HFNUM_FRNUM_MASK (0xffff << 0)
-#define HFNUM_FRNUM_SHIFT 0
-#define HFNUM_MAX_FRNUM 0x3fff
-
-#define HPTXSTS HSOTG_REG(0x0410)
-#define TXSTS_QTOP_ODD BIT(31)
-#define TXSTS_QTOP_CHNEP_MASK (0xf << 27)
-#define TXSTS_QTOP_CHNEP_SHIFT 27
-#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
-#define TXSTS_QTOP_TOKEN_SHIFT 25
-#define TXSTS_QTOP_TERMINATE BIT(24)
-#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
-#define TXSTS_QSPCAVAIL_SHIFT 16
-#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
-#define TXSTS_FSPCAVAIL_SHIFT 0
-
-#define HAINT HSOTG_REG(0x0414)
-#define HAINTMSK HSOTG_REG(0x0418)
-#define HFLBADDR HSOTG_REG(0x041c)
-
-#define HPRT0 HSOTG_REG(0x0440)
-#define HPRT0_SPD_MASK (0x3 << 17)
-#define HPRT0_SPD_SHIFT 17
-#define HPRT0_SPD_HIGH_SPEED 0
-#define HPRT0_SPD_FULL_SPEED 1
-#define HPRT0_SPD_LOW_SPEED 2
-#define HPRT0_TSTCTL_MASK (0xf << 13)
-#define HPRT0_TSTCTL_SHIFT 13
-#define HPRT0_PWR BIT(12)
-#define HPRT0_LNSTS_MASK (0x3 << 10)
-#define HPRT0_LNSTS_SHIFT 10
-#define HPRT0_RST BIT(8)
-#define HPRT0_SUSP BIT(7)
-#define HPRT0_RES BIT(6)
-#define HPRT0_OVRCURRCHG BIT(5)
-#define HPRT0_OVRCURRACT BIT(4)
-#define HPRT0_ENACHG BIT(3)
-#define HPRT0_ENA BIT(2)
-#define HPRT0_CONNDET BIT(1)
-#define HPRT0_CONNSTS BIT(0)
-
-#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch))
-#define HCCHAR_CHENA BIT(31)
-#define HCCHAR_CHDIS BIT(30)
-#define HCCHAR_ODDFRM BIT(29)
-#define HCCHAR_DEVADDR_MASK (0x7f << 22)
-#define HCCHAR_DEVADDR_SHIFT 22
-#define HCCHAR_MULTICNT_MASK (0x3 << 20)
-#define HCCHAR_MULTICNT_SHIFT 20
-#define HCCHAR_EPTYPE_MASK (0x3 << 18)
-#define HCCHAR_EPTYPE_SHIFT 18
-#define HCCHAR_LSPDDEV BIT(17)
-#define HCCHAR_EPDIR BIT(15)
-#define HCCHAR_EPNUM_MASK (0xf << 11)
-#define HCCHAR_EPNUM_SHIFT 11
-#define HCCHAR_MPS_MASK (0x7ff << 0)
-#define HCCHAR_MPS_SHIFT 0
-
-#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch))
-#define HCSPLT_SPLTENA BIT(31)
-#define HCSPLT_COMPSPLT BIT(16)
-#define HCSPLT_XACTPOS_MASK (0x3 << 14)
-#define HCSPLT_XACTPOS_SHIFT 14
-#define HCSPLT_XACTPOS_MID 0
-#define HCSPLT_XACTPOS_END 1
-#define HCSPLT_XACTPOS_BEGIN 2
-#define HCSPLT_XACTPOS_ALL 3
-#define HCSPLT_HUBADDR_MASK (0x7f << 7)
-#define HCSPLT_HUBADDR_SHIFT 7
-#define HCSPLT_PRTADDR_MASK (0x7f << 0)
-#define HCSPLT_PRTADDR_SHIFT 0
-
-#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch))
-#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch))
-#define HCINTMSK_RESERVED14_31 (0x3ffff << 14)
-#define HCINTMSK_FRM_LIST_ROLL BIT(13)
-#define HCINTMSK_XCS_XACT BIT(12)
-#define HCINTMSK_BNA BIT(11)
-#define HCINTMSK_DATATGLERR BIT(10)
-#define HCINTMSK_FRMOVRUN BIT(9)
-#define HCINTMSK_BBLERR BIT(8)
-#define HCINTMSK_XACTERR BIT(7)
-#define HCINTMSK_NYET BIT(6)
-#define HCINTMSK_ACK BIT(5)
-#define HCINTMSK_NAK BIT(4)
-#define HCINTMSK_STALL BIT(3)
-#define HCINTMSK_AHBERR BIT(2)
-#define HCINTMSK_CHHLTD BIT(1)
-#define HCINTMSK_XFERCOMPL BIT(0)
-
-#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch))
-#define TSIZ_DOPNG BIT(31)
-#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
-#define TSIZ_SC_MC_PID_SHIFT 29
-#define TSIZ_SC_MC_PID_DATA0 0
-#define TSIZ_SC_MC_PID_DATA2 1
-#define TSIZ_SC_MC_PID_DATA1 2
-#define TSIZ_SC_MC_PID_MDATA 3
-#define TSIZ_SC_MC_PID_SETUP 3
-#define TSIZ_PKTCNT_MASK (0x3ff << 19)
-#define TSIZ_PKTCNT_SHIFT 19
-#define TSIZ_NTD_MASK (0xff << 8)
-#define TSIZ_NTD_SHIFT 8
-#define TSIZ_SCHINFO_MASK (0xff << 0)
-#define TSIZ_SCHINFO_SHIFT 0
-#define TSIZ_XFERSIZE_MASK (0x7ffff << 0)
-#define TSIZ_XFERSIZE_SHIFT 0
-
-#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch))
-
-#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch))
-
-#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
+#define HCFG HSOTG_REG(0x0400)
+#define HCFG_MODECHTIMEN BIT(31)
+#define HCFG_PERSCHEDENA BIT(26)
+#define HCFG_FRLISTEN_MASK (0x3 << 24)
+#define HCFG_FRLISTEN_SHIFT 24
+#define HCFG_FRLISTEN_8 (0 << 24)
+#define FRLISTEN_8_SIZE 8
+#define HCFG_FRLISTEN_16 BIT(24)
+#define FRLISTEN_16_SIZE 16
+#define HCFG_FRLISTEN_32 (2 << 24)
+#define FRLISTEN_32_SIZE 32
+#define HCFG_FRLISTEN_64 (3 << 24)
+#define FRLISTEN_64_SIZE 64
+#define HCFG_DESCDMA BIT(23)
+#define HCFG_RESVALID_MASK (0xff << 8)
+#define HCFG_RESVALID_SHIFT 8
+#define HCFG_ENA32KHZ BIT(7)
+#define HCFG_FSLSSUPP BIT(2)
+#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
+#define HCFG_FSLSPCLKSEL_SHIFT 0
+#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
+#define HCFG_FSLSPCLKSEL_48_MHZ 1
+#define HCFG_FSLSPCLKSEL_6_MHZ 2
+
+#define HFIR HSOTG_REG(0x0404)
+#define HFIR_FRINT_MASK (0xffff << 0)
+#define HFIR_FRINT_SHIFT 0
+#define HFIR_RLDCTRL BIT(16)
+
+#define HFNUM HSOTG_REG(0x0408)
+#define HFNUM_FRREM_MASK (0xffff << 16)
+#define HFNUM_FRREM_SHIFT 16
+#define HFNUM_FRNUM_MASK (0xffff << 0)
+#define HFNUM_FRNUM_SHIFT 0
+#define HFNUM_MAX_FRNUM 0x3fff
+
+#define HPTXSTS HSOTG_REG(0x0410)
+#define TXSTS_QTOP_ODD BIT(31)
+#define TXSTS_QTOP_CHNEP_MASK (0xf << 27)
+#define TXSTS_QTOP_CHNEP_SHIFT 27
+#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
+#define TXSTS_QTOP_TOKEN_SHIFT 25
+#define TXSTS_QTOP_TERMINATE BIT(24)
+#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_SHIFT 16
+#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
+#define TXSTS_FSPCAVAIL_SHIFT 0
+
+#define HAINT HSOTG_REG(0x0414)
+#define HAINTMSK HSOTG_REG(0x0418)
+#define HFLBADDR HSOTG_REG(0x041c)
+
+#define HPRT0 HSOTG_REG(0x0440)
+#define HPRT0_SPD_MASK (0x3 << 17)
+#define HPRT0_SPD_SHIFT 17
+#define HPRT0_SPD_HIGH_SPEED 0
+#define HPRT0_SPD_FULL_SPEED 1
+#define HPRT0_SPD_LOW_SPEED 2
+#define HPRT0_TSTCTL_MASK (0xf << 13)
+#define HPRT0_TSTCTL_SHIFT 13
+#define HPRT0_PWR BIT(12)
+#define HPRT0_LNSTS_MASK (0x3 << 10)
+#define HPRT0_LNSTS_SHIFT 10
+#define HPRT0_RST BIT(8)
+#define HPRT0_SUSP BIT(7)
+#define HPRT0_RES BIT(6)
+#define HPRT0_OVRCURRCHG BIT(5)
+#define HPRT0_OVRCURRACT BIT(4)
+#define HPRT0_ENACHG BIT(3)
+#define HPRT0_ENA BIT(2)
+#define HPRT0_CONNDET BIT(1)
+#define HPRT0_CONNSTS BIT(0)
+
+#define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch))
+#define HCCHAR_CHENA BIT(31)
+#define HCCHAR_CHDIS BIT(30)
+#define HCCHAR_ODDFRM BIT(29)
+#define HCCHAR_DEVADDR_MASK (0x7f << 22)
+#define HCCHAR_DEVADDR_SHIFT 22
+#define HCCHAR_MULTICNT_MASK (0x3 << 20)
+#define HCCHAR_MULTICNT_SHIFT 20
+#define HCCHAR_EPTYPE_MASK (0x3 << 18)
+#define HCCHAR_EPTYPE_SHIFT 18
+#define HCCHAR_LSPDDEV BIT(17)
+#define HCCHAR_EPDIR BIT(15)
+#define HCCHAR_EPNUM_MASK (0xf << 11)
+#define HCCHAR_EPNUM_SHIFT 11
+#define HCCHAR_MPS_MASK (0x7ff << 0)
+#define HCCHAR_MPS_SHIFT 0
+
+#define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch))
+#define HCSPLT_SPLTENA BIT(31)
+#define HCSPLT_COMPSPLT BIT(16)
+#define HCSPLT_XACTPOS_MASK (0x3 << 14)
+#define HCSPLT_XACTPOS_SHIFT 14
+#define HCSPLT_XACTPOS_MID 0
+#define HCSPLT_XACTPOS_END 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
+#define HCSPLT_HUBADDR_MASK (0x7f << 7)
+#define HCSPLT_HUBADDR_SHIFT 7
+#define HCSPLT_PRTADDR_MASK (0x7f << 0)
+#define HCSPLT_PRTADDR_SHIFT 0
+
+#define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch))
+#define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch))
+#define HCINTMSK_RESERVED14_31 (0x3ffff << 14)
+#define HCINTMSK_FRM_LIST_ROLL BIT(13)
+#define HCINTMSK_XCS_XACT BIT(12)
+#define HCINTMSK_BNA BIT(11)
+#define HCINTMSK_DATATGLERR BIT(10)
+#define HCINTMSK_FRMOVRUN BIT(9)
+#define HCINTMSK_BBLERR BIT(8)
+#define HCINTMSK_XACTERR BIT(7)
+#define HCINTMSK_NYET BIT(6)
+#define HCINTMSK_ACK BIT(5)
+#define HCINTMSK_NAK BIT(4)
+#define HCINTMSK_STALL BIT(3)
+#define HCINTMSK_AHBERR BIT(2)
+#define HCINTMSK_CHHLTD BIT(1)
+#define HCINTMSK_XFERCOMPL BIT(0)
+
+#define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch))
+#define TSIZ_DOPNG BIT(31)
+#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
+#define TSIZ_SC_MC_PID_SHIFT 29
+#define TSIZ_SC_MC_PID_DATA0 0
+#define TSIZ_SC_MC_PID_DATA2 1
+#define TSIZ_SC_MC_PID_DATA1 2
+#define TSIZ_SC_MC_PID_MDATA 3
+#define TSIZ_SC_MC_PID_SETUP 3
+#define TSIZ_PKTCNT_MASK (0x3ff << 19)
+#define TSIZ_PKTCNT_SHIFT 19
+#define TSIZ_NTD_MASK (0xff << 8)
+#define TSIZ_NTD_SHIFT 8
+#define TSIZ_SCHINFO_MASK (0xff << 0)
+#define TSIZ_SCHINFO_SHIFT 0
+#define TSIZ_XFERSIZE_MASK (0x7ffff << 0)
+#define TSIZ_XFERSIZE_SHIFT 0
+
+#define HCDMA(_ch) HSOTG_REG(0x0514 + 0x20 * (_ch))
+
+#define HCDMAB(_ch) HSOTG_REG(0x051c + 0x20 * (_ch))
+
+#define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch))
/**
* struct dwc2_dma_desc - DMA descriptor structure,
@@ -836,64 +836,64 @@
* Status quadlet and Data buffer pointer.
*/
struct dwc2_dma_desc {
- uint32_t status;
- uint32_t buf;
+ uint32_t status;
+ uint32_t buf;
} __packed;
/* Host Mode DMA descriptor status quadlet */
-#define HOST_DMA_A BIT(31)
-#define HOST_DMA_STS_MASK (0x3 << 28)
-#define HOST_DMA_STS_SHIFT 28
-#define HOST_DMA_STS_PKTERR BIT(28)
-#define HOST_DMA_EOL BIT(26)
-#define HOST_DMA_IOC BIT(25)
-#define HOST_DMA_SUP BIT(24)
-#define HOST_DMA_ALT_QTD BIT(23)
-#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17)
-#define HOST_DMA_QTD_OFFSET_SHIFT 17
-#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0)
-#define HOST_DMA_ISOC_NBYTES_SHIFT 0
-#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
-#define HOST_DMA_NBYTES_SHIFT 0
-#define HOST_DMA_NBYTES_LIMIT 131071
+#define HOST_DMA_A BIT(31)
+#define HOST_DMA_STS_MASK (0x3 << 28)
+#define HOST_DMA_STS_SHIFT 28
+#define HOST_DMA_STS_PKTERR BIT(28)
+#define HOST_DMA_EOL BIT(26)
+#define HOST_DMA_IOC BIT(25)
+#define HOST_DMA_SUP BIT(24)
+#define HOST_DMA_ALT_QTD BIT(23)
+#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17)
+#define HOST_DMA_QTD_OFFSET_SHIFT 17
+#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0)
+#define HOST_DMA_ISOC_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_MASK (0x1ffff << 0)
+#define HOST_DMA_NBYTES_SHIFT 0
+#define HOST_DMA_NBYTES_LIMIT 131071
/* Device Mode DMA descriptor status quadlet */
-#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
-#define DEV_DMA_BUFF_STS_SHIFT 30
-#define DEV_DMA_BUFF_STS_HREADY 0
-#define DEV_DMA_BUFF_STS_DMABUSY 1
-#define DEV_DMA_BUFF_STS_DMADONE 2
-#define DEV_DMA_BUFF_STS_HBUSY 3
-#define DEV_DMA_STS_MASK (0x3 << 28)
-#define DEV_DMA_STS_SHIFT 28
-#define DEV_DMA_STS_SUCC 0
-#define DEV_DMA_STS_BUFF_FLUSH 1
-#define DEV_DMA_STS_BUFF_ERR 3
-#define DEV_DMA_L BIT(27)
-#define DEV_DMA_SHORT BIT(26)
-#define DEV_DMA_IOC BIT(25)
-#define DEV_DMA_SR BIT(24)
-#define DEV_DMA_MTRF BIT(23)
-#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
-#define DEV_DMA_ISOC_PID_SHIFT 23
-#define DEV_DMA_ISOC_PID_DATA0 0
-#define DEV_DMA_ISOC_PID_DATA2 1
-#define DEV_DMA_ISOC_PID_DATA1 2
-#define DEV_DMA_ISOC_PID_MDATA 3
-#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
-#define DEV_DMA_ISOC_FRNUM_SHIFT 12
-#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
-#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
-#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
-#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
-#define DEV_DMA_ISOC_NBYTES_SHIFT 0
-#define DEV_DMA_NBYTES_MASK (0xffff << 0)
-#define DEV_DMA_NBYTES_SHIFT 0
-#define DEV_DMA_NBYTES_LIMIT 0xffff
-
-#define MAX_DMA_DESC_NUM_GENERIC 64
-#define MAX_DMA_DESC_NUM_HS_ISOC 256
-
-#endif /* __DWC2_HW_H__ */
+#define DEV_DMA_BUFF_STS_MASK (0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT 30
+#define DEV_DMA_BUFF_STS_HREADY 0
+#define DEV_DMA_BUFF_STS_DMABUSY 1
+#define DEV_DMA_BUFF_STS_DMADONE 2
+#define DEV_DMA_BUFF_STS_HBUSY 3
+#define DEV_DMA_STS_MASK (0x3 << 28)
+#define DEV_DMA_STS_SHIFT 28
+#define DEV_DMA_STS_SUCC 0
+#define DEV_DMA_STS_BUFF_FLUSH 1
+#define DEV_DMA_STS_BUFF_ERR 3
+#define DEV_DMA_L BIT(27)
+#define DEV_DMA_SHORT BIT(26)
+#define DEV_DMA_IOC BIT(25)
+#define DEV_DMA_SR BIT(24)
+#define DEV_DMA_MTRF BIT(23)
+#define DEV_DMA_ISOC_PID_MASK (0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT 23
+#define DEV_DMA_ISOC_PID_DATA0 0
+#define DEV_DMA_ISOC_PID_DATA2 1
+#define DEV_DMA_ISOC_PID_DATA1 2
+#define DEV_DMA_ISOC_PID_MDATA 3
+#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT 12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_MASK (0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT 0
+#define DEV_DMA_NBYTES_LIMIT 0xffff
+
+#define MAX_DMA_DESC_NUM_GENERIC 64
+#define MAX_DMA_DESC_NUM_HS_ISOC 256
+
+#endif /* DWC2_REGS_H */
diff --git a/include/hw/usb/hcd-dwc3.h b/include/hw/usb/hcd-dwc3.h
new file mode 100644
index 0000000000..f752a27e94
--- /dev/null
+++ b/include/hw/usb/hcd-dwc3.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU model of the USB DWC3 host controller emulation.
+ *
+ * Copyright (c) 2020 Xilinx Inc.
+ *
+ * Written by Vikram Garhwal<fnu.vikram@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HCD_DWC3_H
+#define HCD_DWC3_H
+
+#include "hw/register.h"
+#include "hw/usb/hcd-xhci.h"
+#include "hw/usb/hcd-xhci-sysbus.h"
+
+#define TYPE_USB_DWC3 "usb_dwc3"
+
+#define USB_DWC3(obj) \
+ OBJECT_CHECK(USBDWC3, (obj), TYPE_USB_DWC3)
+
+#define USB_DWC3_R_MAX ((0x530 / 4) + 1)
+#define DWC3_SIZE 0x10000
+
+typedef struct USBDWC3 {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ XHCISysbusState sysbus_xhci;
+
+ uint32_t regs[USB_DWC3_R_MAX];
+ RegisterInfo regs_info[USB_DWC3_R_MAX];
+
+ struct {
+ uint8_t mode;
+ uint32_t dwc_usb3_user;
+ } cfg;
+
+} USBDWC3;
+
+#endif
diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h
index c874b9f292..4d4b1ec0fc 100644
--- a/include/hw/usb/hcd-musb.h
+++ b/include/hw/usb/hcd-musb.h
@@ -10,8 +10,10 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef HW_USB_MUSB_H
-#define HW_USB_MUSB_H
+#ifndef HW_USB_HCD_MUSB_H
+#define HW_USB_HCD_MUSB_H
+
+#include "exec/hwaddr.h"
enum musb_irq_source_e {
musb_irq_suspend = 0,
diff --git a/include/hw/usb/hid.h b/include/hw/usb/hid.h
new file mode 100644
index 0000000000..1c142584ff
--- /dev/null
+++ b/include/hw/usb/hid.h
@@ -0,0 +1,17 @@
+#ifndef HW_USB_HID_H
+#define HW_USB_HID_H
+
+/* HID interface requests */
+#define HID_GET_REPORT 0xa101
+#define HID_GET_IDLE 0xa102
+#define HID_GET_PROTOCOL 0xa103
+#define HID_SET_REPORT 0x2109
+#define HID_SET_IDLE 0x210a
+#define HID_SET_PROTOCOL 0x210b
+
+/* HID descriptor types */
+#define USB_DT_HID 0x21
+#define USB_DT_REPORT 0x22
+#define USB_DT_PHY 0x23
+
+#endif
diff --git a/include/hw/usb/imx-usb-phy.h b/include/hw/usb/imx-usb-phy.h
index 07f0235d10..d1e867b77a 100644
--- a/include/hw/usb/imx-usb-phy.h
+++ b/include/hw/usb/imx-usb-phy.h
@@ -3,6 +3,7 @@
#include "hw/sysbus.h"
#include "qemu/bitops.h"
+#include "qom/object.h"
enum IMXUsbPhyRegisters {
USBPHY_PWD,
@@ -38,9 +39,9 @@ enum IMXUsbPhyRegisters {
#define USBPHY_CTRL_SFTRST BIT(31)
#define TYPE_IMX_USBPHY "imx.usbphy"
-#define IMX_USBPHY(obj) OBJECT_CHECK(IMXUSBPHYState, (obj), TYPE_IMX_USBPHY)
+OBJECT_DECLARE_SIMPLE_TYPE(IMXUSBPHYState, IMX_USBPHY)
-typedef struct IMXUSBPHYState {
+struct IMXUSBPHYState {
/* <private> */
SysBusDevice parent_obj;
@@ -48,6 +49,6 @@ typedef struct IMXUSBPHYState {
MemoryRegion iomem;
uint32_t usbphy[USBPHY_MAX];
-} IMXUSBPHYState;
+};
#endif /* IMX_USB_PHY_H */
diff --git a/include/hw/usb/msd.h b/include/hw/usb/msd.h
new file mode 100644
index 0000000000..f9fd862b52
--- /dev/null
+++ b/include/hw/usb/msd.h
@@ -0,0 +1,55 @@
+/*
+ * USB Mass Storage Device emulation
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the LGPL.
+ */
+
+#include "hw/usb.h"
+#include "hw/scsi/scsi.h"
+
+enum USBMSDMode {
+ USB_MSDM_CBW, /* Command Block. */
+ USB_MSDM_DATAOUT, /* Transfer data to device. */
+ USB_MSDM_DATAIN, /* Transfer data from device. */
+ USB_MSDM_CSW /* Command Status. */
+};
+
+struct QEMU_PACKED usb_msd_csw {
+ uint32_t sig;
+ uint32_t tag;
+ uint32_t residue;
+ uint8_t status;
+};
+
+struct MSDState {
+ USBDevice dev;
+ enum USBMSDMode mode;
+ uint32_t scsi_off;
+ uint32_t scsi_len;
+ uint32_t data_len;
+ struct usb_msd_csw csw;
+ SCSIRequest *req;
+ SCSIBus bus;
+ /* For async completion. */
+ USBPacket *packet;
+ /* usb-storage only */
+ BlockConf conf;
+ bool removable;
+ bool commandlog;
+ SCSIDevice *scsi_dev;
+ bool needs_reset;
+};
+
+typedef struct MSDState MSDState;
+#define TYPE_USB_STORAGE "usb-storage-dev"
+DECLARE_INSTANCE_CHECKER(MSDState, USB_STORAGE_DEV,
+ TYPE_USB_STORAGE)
+
+void usb_msd_transfer_data(SCSIRequest *req, uint32_t len);
+void usb_msd_command_complete(SCSIRequest *req, size_t resid);
+void usb_msd_request_cancelled(SCSIRequest *req);
+void *usb_msd_load_request(QEMUFile *f, SCSIRequest *req);
+void usb_msd_handle_reset(USBDevice *dev);
diff --git a/include/hw/usb/xhci.h b/include/hw/usb/xhci.h
new file mode 100644
index 0000000000..5c90e1373e
--- /dev/null
+++ b/include/hw/usb/xhci.h
@@ -0,0 +1,21 @@
+#ifndef HW_USB_XHCI_H
+#define HW_USB_XHCI_H
+
+#define TYPE_XHCI "base-xhci"
+#define TYPE_NEC_XHCI "nec-usb-xhci"
+#define TYPE_QEMU_XHCI "qemu-xhci"
+#define TYPE_XHCI_SYSBUS "sysbus-xhci"
+
+#define XHCI_MAXPORTS_2 15
+#define XHCI_MAXPORTS_3 15
+
+#define XHCI_MAXPORTS (XHCI_MAXPORTS_2 + XHCI_MAXPORTS_3)
+#define XHCI_MAXSLOTS 64
+#define XHCI_MAXINTRS 16
+
+/* must be power of 2 */
+#define XHCI_LEN_REGS 0x4000
+
+void xhci_sysbus_build_aml(Aml *scope, uint32_t mmio, unsigned int irq);
+
+#endif
diff --git a/include/hw/usb/xlnx-usb-subsystem.h b/include/hw/usb/xlnx-usb-subsystem.h
new file mode 100644
index 0000000000..40f9e97e09
--- /dev/null
+++ b/include/hw/usb/xlnx-usb-subsystem.h
@@ -0,0 +1,47 @@
+/*
+ * QEMU model of the Xilinx usb subsystem
+ *
+ * Copyright (c) 2020 Xilinx Inc. Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_USB_SUBSYSTEM_H
+#define XLNX_USB_SUBSYSTEM_H
+
+#include "hw/register.h"
+#include "hw/sysbus.h"
+#include "hw/usb/xlnx-versal-usb2-ctrl-regs.h"
+#include "hw/usb/hcd-dwc3.h"
+
+#define TYPE_XILINX_VERSAL_USB2 "xlnx.versal-usb2"
+
+#define VERSAL_USB2(obj) \
+ OBJECT_CHECK(VersalUsb2, (obj), TYPE_XILINX_VERSAL_USB2)
+
+typedef struct VersalUsb2 {
+ SysBusDevice parent_obj;
+ MemoryRegion dwc3_mr;
+ MemoryRegion usb2Ctrl_mr;
+
+ VersalUsb2CtrlRegs usb2Ctrl;
+ USBDWC3 dwc3;
+} VersalUsb2;
+
+#endif
diff --git a/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h
new file mode 100644
index 0000000000..6a502006b0
--- /dev/null
+++ b/include/hw/usb/xlnx-versal-usb2-ctrl-regs.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU model of the VersalUsb2CtrlRegs Register control/Status block for
+ * USB2.0 controller
+ *
+ * Copyright (c) 2020 Xilinx Inc. Vikram Garhwal <fnu.vikram@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_VERSAL_USB2_CTRL_REGS_H
+#define XLNX_VERSAL_USB2_CTRL_REGS_H
+
+#include "hw/register.h"
+#include "hw/sysbus.h"
+
+#define TYPE_XILINX_VERSAL_USB2_CTRL_REGS "xlnx.versal-usb2-ctrl-regs"
+
+#define XILINX_VERSAL_USB2_CTRL_REGS(obj) \
+ OBJECT_CHECK(VersalUsb2CtrlRegs, (obj), TYPE_XILINX_VERSAL_USB2_CTRL_REGS)
+
+#define USB2_REGS_R_MAX ((0x78 / 4) + 1)
+
+typedef struct VersalUsb2CtrlRegs {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_ir;
+
+ uint32_t regs[USB2_REGS_R_MAX];
+ RegisterInfo regs_info[USB2_REGS_R_MAX];
+} VersalUsb2CtrlRegs;
+
+#endif
diff --git a/include/hw/vfio/vfio-amd-xgbe.h b/include/hw/vfio/vfio-amd-xgbe.h
index 9fff65e99d..a894546c02 100644
--- a/include/hw/vfio/vfio-amd-xgbe.h
+++ b/include/hw/vfio/vfio-amd-xgbe.h
@@ -15,6 +15,7 @@
#define HW_VFIO_VFIO_AMD_XGBE_H
#include "hw/vfio/vfio-platform.h"
+#include "qom/object.h"
#define TYPE_VFIO_AMD_XGBE "vfio-amd-xgbe"
@@ -39,13 +40,7 @@ struct VFIOAmdXgbeDeviceClass {
typedef struct VFIOAmdXgbeDeviceClass VFIOAmdXgbeDeviceClass;
-#define VFIO_AMD_XGBE_DEVICE(obj) \
- OBJECT_CHECK(VFIOAmdXgbeDevice, (obj), TYPE_VFIO_AMD_XGBE)
-#define VFIO_AMD_XGBE_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOAmdXgbeDeviceClass, (klass), \
- TYPE_VFIO_AMD_XGBE)
-#define VFIO_AMD_XGBE_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOAmdXgbeDeviceClass, (obj), \
- TYPE_VFIO_AMD_XGBE)
+DECLARE_OBJ_CHECKERS(VFIOAmdXgbeDevice, VFIOAmdXgbeDeviceClass,
+ VFIO_AMD_XGBE_DEVICE, TYPE_VFIO_AMD_XGBE)
#endif
diff --git a/include/hw/vfio/vfio-calxeda-xgmac.h b/include/hw/vfio/vfio-calxeda-xgmac.h
index f994775c09..8482f151dd 100644
--- a/include/hw/vfio/vfio-calxeda-xgmac.h
+++ b/include/hw/vfio/vfio-calxeda-xgmac.h
@@ -15,6 +15,7 @@
#define HW_VFIO_VFIO_CALXEDA_XGMAC_H
#include "hw/vfio/vfio-platform.h"
+#include "qom/object.h"
#define TYPE_VFIO_CALXEDA_XGMAC "vfio-calxeda-xgmac"
@@ -23,24 +24,20 @@
* - a single MMIO region corresponding to its register space
* - 3 IRQS (main and 2 power related IRQs)
*/
-typedef struct VFIOCalxedaXgmacDevice {
+struct VFIOCalxedaXgmacDevice {
VFIOPlatformDevice vdev;
-} VFIOCalxedaXgmacDevice;
+};
+typedef struct VFIOCalxedaXgmacDevice VFIOCalxedaXgmacDevice;
-typedef struct VFIOCalxedaXgmacDeviceClass {
+struct VFIOCalxedaXgmacDeviceClass {
/*< private >*/
VFIOPlatformDeviceClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
-} VFIOCalxedaXgmacDeviceClass;
+};
+typedef struct VFIOCalxedaXgmacDeviceClass VFIOCalxedaXgmacDeviceClass;
-#define VFIO_CALXEDA_XGMAC_DEVICE(obj) \
- OBJECT_CHECK(VFIOCalxedaXgmacDevice, (obj), TYPE_VFIO_CALXEDA_XGMAC)
-#define VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOCalxedaXgmacDeviceClass, (klass), \
- TYPE_VFIO_CALXEDA_XGMAC)
-#define VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOCalxedaXgmacDeviceClass, (obj), \
- TYPE_VFIO_CALXEDA_XGMAC)
+DECLARE_OBJ_CHECKERS(VFIOCalxedaXgmacDevice, VFIOCalxedaXgmacDeviceClass,
+ VFIO_CALXEDA_XGMAC_DEVICE, TYPE_VFIO_CALXEDA_XGMAC)
#endif
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index c78f3ff559..b9da6c08ef 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -29,6 +29,8 @@
#ifdef CONFIG_LINUX
#include <linux/vfio.h>
#endif
+#include "sysemu/sysemu.h"
+#include "hw/vfio/vfio-container-base.h"
#define VFIO_MSG_PREFIX "vfio %s: "
@@ -57,37 +59,29 @@ typedef struct VFIORegion {
uint8_t nr; /* cache the region number for debug */
} VFIORegion;
-typedef struct VFIOAddressSpace {
- AddressSpace *as;
- QLIST_HEAD(, VFIOContainer) containers;
- QLIST_ENTRY(VFIOAddressSpace) list;
-} VFIOAddressSpace;
+typedef struct VFIOMigration {
+ struct VFIODevice *vbasedev;
+ VMChangeStateEntry *vm_state;
+ NotifierWithReturn migration_state;
+ uint32_t device_state;
+ int data_fd;
+ void *data_buffer;
+ size_t data_buffer_size;
+ uint64_t mig_flags;
+ uint64_t precopy_init_size;
+ uint64_t precopy_dirty_size;
+ bool initial_data_sent;
+} VFIOMigration;
struct VFIOGroup;
typedef struct VFIOContainer {
- VFIOAddressSpace *space;
+ VFIOContainerBase bcontainer;
int fd; /* /dev/vfio/vfio, empowered by the attached groups */
- MemoryListener listener;
- MemoryListener prereg_listener;
unsigned iommu_type;
- Error *error;
- bool initialized;
- unsigned long pgsizes;
- QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
- QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
- QLIST_ENTRY(VFIOContainer) next;
} VFIOContainer;
-typedef struct VFIOGuestIOMMU {
- VFIOContainer *container;
- IOMMUMemoryRegion *iommu;
- hwaddr iommu_offset;
- IOMMUNotifier n;
- QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
-} VFIOGuestIOMMU;
-
typedef struct VFIOHostDMAWindow {
hwaddr min_iova;
hwaddr max_iova;
@@ -95,11 +89,22 @@ typedef struct VFIOHostDMAWindow {
QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
} VFIOHostDMAWindow;
+typedef struct IOMMUFDBackend IOMMUFDBackend;
+
+typedef struct VFIOIOMMUFDContainer {
+ VFIOContainerBase bcontainer;
+ IOMMUFDBackend *be;
+ uint32_t ioas_id;
+} VFIOIOMMUFDContainer;
+
typedef struct VFIODeviceOps VFIODeviceOps;
typedef struct VFIODevice {
QLIST_ENTRY(VFIODevice) next;
+ QLIST_ENTRY(VFIODevice) container_next;
+ QLIST_ENTRY(VFIODevice) global_next;
struct VFIOGroup *group;
+ VFIOContainerBase *bcontainer;
char *sysfsdev;
char *name;
DeviceState *dev;
@@ -109,16 +114,27 @@ typedef struct VFIODevice {
bool needs_reset;
bool no_mmap;
bool ram_block_discard_allowed;
+ OnOffAuto enable_migration;
VFIODeviceOps *ops;
unsigned int num_irqs;
unsigned int num_regions;
unsigned int flags;
+ VFIOMigration *migration;
+ Error *migration_blocker;
+ OnOffAuto pre_copy_dirty_page_tracking;
+ bool dirty_pages_supported;
+ bool dirty_tracking;
+ int devid;
+ IOMMUFDBackend *iommufd;
} VFIODevice;
struct VFIODeviceOps {
void (*vfio_compute_needs_reset)(VFIODevice *vdev);
int (*vfio_hot_reset_multi)(VFIODevice *vdev);
void (*vfio_eoi)(VFIODevice *vdev);
+ Object *(*vfio_get_object)(VFIODevice *vdev);
+ void (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f);
+ int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
};
typedef struct VFIOGroup {
@@ -157,7 +173,13 @@ typedef struct VFIODisplay {
} dmabuf;
} VFIODisplay;
-void vfio_put_base_device(VFIODevice *vbasedev);
+VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
+void vfio_put_address_space(VFIOAddressSpace *space);
+
+/* SPAPR specific */
+int vfio_spapr_container_init(VFIOContainer *container, Error **errp);
+void vfio_spapr_container_deinit(VFIOContainer *container);
+
void vfio_disable_irqindex(VFIODevice *vbasedev, int index);
void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index);
void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index);
@@ -171,17 +193,37 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
int index, const char *name);
int vfio_region_mmap(VFIORegion *region);
void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled);
+void vfio_region_unmap(VFIORegion *region);
void vfio_region_exit(VFIORegion *region);
void vfio_region_finalize(VFIORegion *region);
void vfio_reset_handler(void *opaque);
-VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp);
-void vfio_put_group(VFIOGroup *group);
-int vfio_get_device(VFIOGroup *group, const char *name,
- VFIODevice *vbasedev, Error **errp);
+struct vfio_device_info *vfio_get_device_info(int fd);
+int vfio_attach_device(char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+void vfio_detach_device(VFIODevice *vbasedev);
+
+int vfio_kvm_device_add_fd(int fd, Error **errp);
+int vfio_kvm_device_del_fd(int fd, Error **errp);
+
+int vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp);
+void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer);
extern const MemoryRegionOps vfio_region_ops;
typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
+typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList;
extern VFIOGroupList vfio_group_list;
+extern VFIODeviceList vfio_device_list;
+extern const MemoryListener vfio_memory_listener;
+extern int vfio_kvm_device_fd;
+
+bool vfio_mig_active(void);
+int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp);
+void vfio_unblock_multiple_devices_migration(void);
+bool vfio_viommu_preset(VFIODevice *vbasedev);
+int64_t vfio_mig_bytes_transferred(void);
+void vfio_reset_bytes_transferred(void);
+bool vfio_device_state_is_running(VFIODevice *vbasedev);
+bool vfio_device_state_is_precopy(VFIODevice *vbasedev);
#ifdef CONFIG_LINUX
int vfio_get_region_info(VFIODevice *vbasedev, int index,
@@ -191,13 +233,31 @@ int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type);
struct vfio_info_cap_header *
vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id);
+bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
+ unsigned int *avail);
+struct vfio_info_cap_header *
+vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id);
+struct vfio_info_cap_header *
+vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id);
#endif
-extern const MemoryListener vfio_prereg_listener;
-int vfio_spapr_create_window(VFIOContainer *container,
- MemoryRegionSection *section,
- hwaddr *pgsize);
-int vfio_spapr_remove_window(VFIOContainer *container,
- hwaddr offset_within_address_space);
+bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
+void vfio_migration_exit(VFIODevice *vbasedev);
+
+int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
+bool
+vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
+bool
+vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
+int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size);
+int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr);
+/* Returns 0 on success, or a negative errno. */
+int vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard);
#endif /* HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
new file mode 100644
index 0000000000..3582d5f97a
--- /dev/null
+++ b/include/hw/vfio/vfio-container-base.h
@@ -0,0 +1,141 @@
+/*
+ * VFIO BASE CONTAINER
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H
+#define HW_VFIO_VFIO_CONTAINER_BASE_H
+
+#include "exec/memory.h"
+
+typedef struct VFIODevice VFIODevice;
+typedef struct VFIOIOMMUClass VFIOIOMMUClass;
+
+typedef struct {
+ unsigned long *bitmap;
+ hwaddr size;
+ hwaddr pages;
+} VFIOBitmap;
+
+typedef struct VFIOAddressSpace {
+ AddressSpace *as;
+ QLIST_HEAD(, VFIOContainerBase) containers;
+ QLIST_ENTRY(VFIOAddressSpace) list;
+} VFIOAddressSpace;
+
+/*
+ * This is the base object for vfio container backends
+ */
+typedef struct VFIOContainerBase {
+ const VFIOIOMMUClass *ops;
+ VFIOAddressSpace *space;
+ MemoryListener listener;
+ Error *error;
+ bool initialized;
+ uint64_t dirty_pgsizes;
+ uint64_t max_dirty_bitmap_size;
+ unsigned long pgsizes;
+ unsigned int dma_max_mappings;
+ bool dirty_pages_supported;
+ QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
+ QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
+ QLIST_ENTRY(VFIOContainerBase) next;
+ QLIST_HEAD(, VFIODevice) device_list;
+ GList *iova_ranges;
+ NotifierWithReturn cpr_reboot_notifier;
+} VFIOContainerBase;
+
+typedef struct VFIOGuestIOMMU {
+ VFIOContainerBase *bcontainer;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
+} VFIOGuestIOMMU;
+
+typedef struct VFIORamDiscardListener {
+ VFIOContainerBase *bcontainer;
+ MemoryRegion *mr;
+ hwaddr offset_within_address_space;
+ hwaddr size;
+ uint64_t granularity;
+ RamDiscardListener listener;
+ QLIST_ENTRY(VFIORamDiscardListener) next;
+} VFIORamDiscardListener;
+
+int vfio_container_dma_map(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start);
+int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+
+void vfio_container_init(VFIOContainerBase *bcontainer,
+ VFIOAddressSpace *space,
+ const VFIOIOMMUClass *ops);
+void vfio_container_destroy(VFIOContainerBase *bcontainer);
+
+
+#define TYPE_VFIO_IOMMU "vfio-iommu"
+#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy"
+#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr"
+#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd"
+
+/*
+ * VFIOContainerBase is not an abstract QOM object because it felt
+ * unnecessary to expose all the IOMMU backends to the QEMU machine
+ * and human interface. However, we can still abstract the IOMMU
+ * backend handlers using a QOM interface class. This provides more
+ * flexibility when referencing the various implementations.
+ */
+DECLARE_CLASS_CHECKERS(VFIOIOMMUClass, VFIO_IOMMU, TYPE_VFIO_IOMMU)
+
+struct VFIOIOMMUClass {
+ InterfaceClass parent_class;
+
+ /* basic feature */
+ int (*setup)(VFIOContainerBase *bcontainer, Error **errp);
+ int (*dma_map)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+ int (*dma_unmap)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+ int (*attach_device)(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+ void (*detach_device)(VFIODevice *vbasedev);
+ /* migration feature */
+ int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer,
+ bool start);
+ int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+ /* PCI specific */
+ int (*pci_hot_reset)(VFIODevice *vbasedev, bool single);
+
+ /* SPAPR specific */
+ int (*add_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+ void (*del_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+ void (*release)(VFIOContainerBase *bcontainer);
+};
+#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */
diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h
index 4ec70c813a..c414c3dffc 100644
--- a/include/hw/vfio/vfio-platform.h
+++ b/include/hw/vfio/vfio-platform.h
@@ -20,6 +20,7 @@
#include "hw/vfio/vfio-common.h"
#include "qemu/event_notifier.h"
#include "qemu/queue.h"
+#include "qom/object.h"
#define TYPE_VFIO_PLATFORM "vfio-platform"
@@ -46,7 +47,7 @@ typedef struct VFIOINTp {
/* function type for user side eventfd handler */
typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp);
-typedef struct VFIOPlatformDevice {
+struct VFIOPlatformDevice {
SysBusDevice sbdev;
VFIODevice vbasedev; /* not a QOM object */
VFIORegion **regions;
@@ -59,19 +60,17 @@ typedef struct VFIOPlatformDevice {
QEMUTimer *mmap_timer; /* allows fast-path resume after IRQ hit */
QemuMutex intp_mutex; /* protect the intp_list IRQ state */
bool irqfd_allowed; /* debug option to force irqfd on/off */
-} VFIOPlatformDevice;
+};
+typedef struct VFIOPlatformDevice VFIOPlatformDevice;
-typedef struct VFIOPlatformDeviceClass {
+struct VFIOPlatformDeviceClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/
-} VFIOPlatformDeviceClass;
+};
+typedef struct VFIOPlatformDeviceClass VFIOPlatformDeviceClass;
-#define VFIO_PLATFORM_DEVICE(obj) \
- OBJECT_CHECK(VFIOPlatformDevice, (obj), TYPE_VFIO_PLATFORM)
-#define VFIO_PLATFORM_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VFIOPlatformDeviceClass, (klass), TYPE_VFIO_PLATFORM)
-#define VFIO_PLATFORM_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VFIOPlatformDeviceClass, (obj), TYPE_VFIO_PLATFORM)
+DECLARE_OBJ_CHECKERS(VFIOPlatformDevice, VFIOPlatformDeviceClass,
+ VFIO_PLATFORM_DEVICE, TYPE_VFIO_PLATFORM)
#endif /* HW_VFIO_VFIO_PLATFORM_H */
diff --git a/include/hw/vfio/vfio.h b/include/hw/vfio/vfio.h
deleted file mode 100644
index 86248f5436..0000000000
--- a/include/hw/vfio/vfio.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef HW_VFIO_H
-#define HW_VFIO_H
-
-bool vfio_eeh_as_ok(AddressSpace *as);
-int vfio_eeh_as_op(AddressSpace *as, uint32_t op);
-
-#endif
diff --git a/include/hw/virtio/vdpa-dev.h b/include/hw/virtio/vdpa-dev.h
new file mode 100644
index 0000000000..4dbf98195c
--- /dev/null
+++ b/include/hw/virtio/vdpa-dev.h
@@ -0,0 +1,43 @@
+/*
+ * Vhost Vdpa Device
+ *
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
+ *
+ * Authors:
+ * Longpeng <longpeng2@huawei.com>
+ *
+ * Largely based on the "vhost-user-blk.h" implemented by:
+ * Changpeng Liu <changpeng.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+#ifndef _VHOST_VDPA_DEVICE_H
+#define _VHOST_VDPA_DEVICE_H
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-vdpa.h"
+#include "qom/object.h"
+
+
+#define TYPE_VHOST_VDPA_DEVICE "vhost-vdpa-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostVdpaDevice, VHOST_VDPA_DEVICE)
+
+struct VhostVdpaDevice {
+ VirtIODevice parent_obj;
+ char *vhostdev;
+ int vhostfd;
+ int32_t bootindex;
+ uint32_t vdev_id;
+ uint32_t num_queues;
+ struct vhost_dev dev;
+ struct vhost_vdpa vdpa;
+ VirtQueue **virtqs;
+ uint8_t *config;
+ int config_size;
+ uint16_t queue_size;
+ bool started;
+ int (*post_init)(VhostVdpaDevice *v, Error **errp);
+};
+
+#endif
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 8825bd278f..70c2e8ffee 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -22,10 +22,22 @@ typedef enum VhostBackendType {
} VhostBackendType;
typedef enum VhostSetConfigType {
- VHOST_SET_CONFIG_TYPE_MASTER = 0,
+ VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
} VhostSetConfigType;
+typedef enum VhostDeviceStateDirection {
+ /* Transfer state from back-end (device) to front-end */
+ VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
+ /* Transfer state from front-end to back-end (device) */
+ VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
+} VhostDeviceStateDirection;
+
+typedef enum VhostDeviceStatePhase {
+ /* The device (and all its vrings) is stopped */
+ VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
+} VhostDeviceStatePhase;
+
struct vhost_inflight;
struct vhost_dev;
struct vhost_log;
@@ -33,11 +45,14 @@ struct vhost_memory;
struct vhost_vring_file;
struct vhost_vring_state;
struct vhost_vring_addr;
+struct vhost_vring_worker;
+struct vhost_worker_state;
struct vhost_scsi_target;
struct vhost_iotlb_msg;
struct vhost_virtqueue;
-typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
+typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque,
+ Error **errp);
typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
@@ -68,12 +83,23 @@ typedef int (*vhost_set_vring_kick_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
+typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
+ struct vhost_vring_file *file);
typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
struct vhost_vring_state *r);
+typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_new_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
+typedef int (*vhost_free_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
uint64_t features);
typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
uint64_t *features);
+typedef int (*vhost_set_backend_cap_op)(struct vhost_dev *dev);
typedef int (*vhost_set_owner_op)(struct vhost_dev *dev);
typedef int (*vhost_reset_device_op)(struct vhost_dev *dev);
typedef int (*vhost_get_vq_index_op)(struct vhost_dev *dev, int idx);
@@ -82,9 +108,6 @@ typedef int (*vhost_set_vring_enable_op)(struct vhost_dev *dev,
typedef bool (*vhost_requires_shm_log_op)(struct vhost_dev *dev);
typedef int (*vhost_migration_done_op)(struct vhost_dev *dev,
char *mac_addr);
-typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev,
- uint64_t start1, uint64_t size1,
- uint64_t start2, uint64_t size2);
typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev,
uint64_t guest_cid);
typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start);
@@ -96,7 +119,7 @@ typedef int (*vhost_set_config_op)(struct vhost_dev *dev, const uint8_t *data,
uint32_t offset, uint32_t size,
uint32_t flags);
typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config,
- uint32_t config_len);
+ uint32_t config_len, Error **errp);
typedef int (*vhost_crypto_create_session_op)(struct vhost_dev *dev,
void *session_info,
@@ -104,8 +127,7 @@ typedef int (*vhost_crypto_create_session_op)(struct vhost_dev *dev,
typedef int (*vhost_crypto_close_session_op)(struct vhost_dev *dev,
uint64_t session_id);
-typedef bool (*vhost_backend_mem_section_filter_op)(struct vhost_dev *dev,
- MemoryRegionSection *section);
+typedef bool (*vhost_backend_no_private_memslots_op)(struct vhost_dev *dev);
typedef int (*vhost_get_inflight_fd_op)(struct vhost_dev *dev,
uint16_t queue_size,
@@ -124,11 +146,26 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id);
typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
+typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
+ int fd);
+
+typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
+
+typedef bool (*vhost_supports_device_state_op)(struct vhost_dev *dev);
+typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
+ VhostDeviceStateDirection direction,
+ VhostDeviceStatePhase phase,
+ int fd,
+ int *reply_fd,
+ Error **errp);
+typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, Error **errp);
+
typedef struct VhostOps {
VhostBackendType backend_type;
vhost_backend_init vhost_backend_init;
vhost_backend_cleanup vhost_backend_cleanup;
vhost_backend_memslots_limit vhost_backend_memslots_limit;
+ vhost_backend_no_private_memslots_op vhost_backend_no_private_memslots;
vhost_net_set_backend_op vhost_net_set_backend;
vhost_net_set_mtu_op vhost_net_set_mtu;
vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
@@ -143,16 +180,21 @@ typedef struct VhostOps {
vhost_get_vring_base_op vhost_get_vring_base;
vhost_set_vring_kick_op vhost_set_vring_kick;
vhost_set_vring_call_op vhost_set_vring_call;
+ vhost_set_vring_err_op vhost_set_vring_err;
vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
+ vhost_new_worker_op vhost_new_worker;
+ vhost_free_worker_op vhost_free_worker;
+ vhost_get_vring_worker_op vhost_get_vring_worker;
+ vhost_attach_vring_worker_op vhost_attach_vring_worker;
vhost_set_features_op vhost_set_features;
vhost_get_features_op vhost_get_features;
+ vhost_set_backend_cap_op vhost_set_backend_cap;
vhost_set_owner_op vhost_set_owner;
vhost_reset_device_op vhost_reset_device;
vhost_get_vq_index_op vhost_get_vq_index;
vhost_set_vring_enable_op vhost_set_vring_enable;
vhost_requires_shm_log_op vhost_requires_shm_log;
vhost_migration_done_op vhost_migration_done;
- vhost_backend_can_merge_op vhost_backend_can_merge;
vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid;
vhost_vsock_set_running_op vhost_vsock_set_running;
vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
@@ -161,21 +203,19 @@ typedef struct VhostOps {
vhost_set_config_op vhost_set_config;
vhost_crypto_create_session_op vhost_crypto_create_session;
vhost_crypto_close_session_op vhost_crypto_close_session;
- vhost_backend_mem_section_filter_op vhost_backend_mem_section_filter;
vhost_get_inflight_fd_op vhost_get_inflight_fd;
vhost_set_inflight_fd_op vhost_set_inflight_fd;
vhost_dev_start_op vhost_dev_start;
vhost_vq_get_addr_op vhost_vq_get_addr;
vhost_get_device_id_op vhost_get_device_id;
vhost_force_iommu_op vhost_force_iommu;
+ vhost_set_config_call_op vhost_set_config_call;
+ vhost_reset_status_op vhost_reset_status;
+ vhost_supports_device_state_op vhost_supports_device_state;
+ vhost_set_device_state_fd_op vhost_set_device_state_fd;
+ vhost_check_device_state_op vhost_check_device_state;
} VhostOps;
-extern const VhostOps user_ops;
-extern const VhostOps vdpa_ops;
-
-int vhost_set_backend_type(struct vhost_dev *dev,
- VhostBackendType backend_type);
-
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
uint64_t iova, uint64_t uaddr,
uint64_t len,
@@ -189,4 +229,7 @@ int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd);
+int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
+ int *dmabuf_fd);
+
#endif /* VHOST_BACKEND_H */
diff --git a/include/hw/virtio/vhost-scsi-common.h b/include/hw/virtio/vhost-scsi-common.h
index 16bf1a73c1..c5d2c09455 100644
--- a/include/hw/virtio/vhost-scsi-common.h
+++ b/include/hw/virtio/vhost-scsi-common.h
@@ -17,12 +17,12 @@
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost.h"
#include "hw/fw-path-provider.h"
+#include "qom/object.h"
#define TYPE_VHOST_SCSI_COMMON "vhost-scsi-common"
-#define VHOST_SCSI_COMMON(obj) \
- OBJECT_CHECK(VHostSCSICommon, (obj), TYPE_VHOST_SCSI_COMMON)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSICommon, VHOST_SCSI_COMMON)
-typedef struct VHostSCSICommon {
+struct VHostSCSICommon {
VirtIOSCSICommon parent_obj;
Error *migration_blocker;
@@ -35,9 +35,11 @@ typedef struct VHostSCSICommon {
int lun;
uint64_t host_features;
bool migratable;
-} VHostSCSICommon;
-int vhost_scsi_common_start(VHostSCSICommon *vsc);
+ struct vhost_inflight *inflight;
+};
+
+int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp);
void vhost_scsi_common_stop(VHostSCSICommon *vsc);
char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
diff --git a/include/hw/virtio/vhost-scsi.h b/include/hw/virtio/vhost-scsi.h
index 23252153ff..7dc2bdd69d 100644
--- a/include/hw/virtio/vhost-scsi.h
+++ b/include/hw/virtio/vhost-scsi.h
@@ -17,6 +17,7 @@
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-scsi-common.h"
+#include "qom/object.h"
enum vhost_scsi_vq_list {
VHOST_SCSI_VQ_CONTROL = 0,
@@ -25,11 +26,10 @@ enum vhost_scsi_vq_list {
};
#define TYPE_VHOST_SCSI "vhost-scsi"
-#define VHOST_SCSI(obj) \
- OBJECT_CHECK(VHostSCSI, (obj), TYPE_VHOST_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSI, VHOST_SCSI)
-typedef struct VHostSCSI {
+struct VHostSCSI {
VHostSCSICommon parent_obj;
-} VHostSCSI;
+};
#endif
diff --git a/include/hw/virtio/vhost-user-base.h b/include/hw/virtio/vhost-user-base.h
new file mode 100644
index 0000000000..51d0968b89
--- /dev/null
+++ b/include/hw/virtio/vhost-user-base.h
@@ -0,0 +1,49 @@
+/*
+ * Vhost-user generic virtio device
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_BASE_H
+#define QEMU_VHOST_USER_BASE_H
+
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+
+#define TYPE_VHOST_USER_BASE "vhost-user-base"
+
+OBJECT_DECLARE_TYPE(VHostUserBase, VHostUserBaseClass, VHOST_USER_BASE)
+
+struct VHostUserBase {
+ VirtIODevice parent_obj;
+
+ /* Properties */
+ CharBackend chardev;
+ uint16_t virtio_id;
+ uint32_t num_vqs;
+ uint32_t vq_size; /* can't exceed VIRTIO_QUEUE_MAX */
+ uint32_t config_size;
+ /* State tracking */
+ VhostUserState vhost_user;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev vhost_dev;
+ GPtrArray *vqs;
+ bool connected;
+};
+
+/*
+ * Needed so we can use the base realize after specialisation
+ * tweaks
+ */
+struct VHostUserBaseClass {
+ VirtioDeviceClass parent_class;
+
+ DeviceRealize parent_realize;
+};
+
+
+#define TYPE_VHOST_USER_DEVICE "vhost-user-device"
+
+#endif /* QEMU_VHOST_USER_BASE_H */
diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h
index 34ad6f0c0e..ea085ee1ed 100644
--- a/include/hw/virtio/vhost-user-blk.h
+++ b/include/hw/virtio/vhost-user-blk.h
@@ -20,25 +20,36 @@
#include "chardev/char-fe.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_BLK "vhost-user-blk"
-#define VHOST_USER_BLK(obj) \
- OBJECT_CHECK(VHostUserBlk, (obj), TYPE_VHOST_USER_BLK)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserBlk, VHOST_USER_BLK)
-typedef struct VHostUserBlk {
+#define VHOST_USER_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
+struct VHostUserBlk {
VirtIODevice parent_obj;
CharBackend chardev;
int32_t bootindex;
struct virtio_blk_config blkcfg;
uint16_t num_queues;
uint32_t queue_size;
- uint32_t config_wce;
struct vhost_dev dev;
struct vhost_inflight *inflight;
VhostUserState vhost_user;
struct vhost_virtqueue *vhost_vqs;
VirtQueue **virtqs;
+
+ /*
+ * There are at least two steps of initialization of the
+ * vhost-user device. The first is a "connect" step and
+ * second is a "start" step. Make a separation between
+ * those initialization phases by using two fields.
+ */
+ /* vhost_user_blk_connect/vhost_user_blk_disconnect */
bool connected;
-} VHostUserBlk;
+ /* vhost_user_blk_start/vhost_user_blk_stop */
+ bool started_vu;
+};
#endif
diff --git a/include/hw/virtio/vhost-user-fs.h b/include/hw/virtio/vhost-user-fs.h
index 6f3030d288..94c3aaa84e 100644
--- a/include/hw/virtio/vhost-user-fs.h
+++ b/include/hw/virtio/vhost-user-fs.h
@@ -11,17 +11,17 @@
* top-level directory.
*/
-#ifndef _QEMU_VHOST_USER_FS_H
-#define _QEMU_VHOST_USER_FS_H
+#ifndef QEMU_VHOST_USER_FS_H
+#define QEMU_VHOST_USER_FS_H
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
#include "chardev/char-fe.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_FS "vhost-user-fs-device"
-#define VHOST_USER_FS(obj) \
- OBJECT_CHECK(VHostUserFS, (obj), TYPE_VHOST_USER_FS)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserFS, VHOST_USER_FS)
typedef struct {
CharBackend chardev;
@@ -30,7 +30,7 @@ typedef struct {
uint16_t queue_size;
} VHostUserFSConf;
-typedef struct {
+struct VHostUserFS {
/*< private >*/
VirtIODevice parent;
VHostUserFSConf conf;
@@ -39,8 +39,9 @@ typedef struct {
VhostUserState vhost_user;
VirtQueue **req_vqs;
VirtQueue *hiprio_vq;
+ int32_t bootindex;
/*< public >*/
-} VHostUserFS;
+};
-#endif /* _QEMU_VHOST_USER_FS_H */
+#endif /* QEMU_VHOST_USER_FS_H */
diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h
new file mode 100644
index 0000000000..5814a8400a
--- /dev/null
+++ b/include/hw/virtio/vhost-user-gpio.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user GPIO virtio device
+ *
+ * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_GPIO_H
+#define _QEMU_VHOST_USER_GPIO_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserGPIO, VHOST_USER_GPIO);
+
+struct VHostUserGPIO {
+ VHostUserBase parent_obj;
+};
+
+#endif /* _QEMU_VHOST_USER_GPIO_H */
diff --git a/include/hw/virtio/vhost-user-i2c.h b/include/hw/virtio/vhost-user-i2c.h
new file mode 100644
index 0000000000..a9b5612ad0
--- /dev/null
+++ b/include/hw/virtio/vhost-user-i2c.h
@@ -0,0 +1,25 @@
+/*
+ * Vhost-user i2c virtio device
+ *
+ * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_I2C_H
+#define QEMU_VHOST_USER_I2C_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_I2C "vhost-user-i2c-device"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserI2C, VHOST_USER_I2C)
+
+struct VHostUserI2C {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_I2C_H */
diff --git a/include/hw/virtio/vhost-user-rng.h b/include/hw/virtio/vhost-user-rng.h
new file mode 100644
index 0000000000..10868c7de4
--- /dev/null
+++ b/include/hw/virtio/vhost-user-rng.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user RNG virtio device
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_RNG_H
+#define QEMU_VHOST_USER_RNG_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_RNG "vhost-user-rng"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG)
+
+struct VHostUserRNG {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_RNG_H */
diff --git a/include/hw/virtio/vhost-user-scmi.h b/include/hw/virtio/vhost-user-scmi.h
new file mode 100644
index 0000000000..c90db77dd5
--- /dev/null
+++ b/include/hw/virtio/vhost-user-scmi.h
@@ -0,0 +1,31 @@
+/*
+ * Vhost-user SCMI virtio device
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_SCMI_H
+#define _QEMU_VHOST_USER_SCMI_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+
+#define TYPE_VHOST_USER_SCMI "vhost-user-scmi"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCMI, VHOST_USER_SCMI);
+
+struct VHostUserSCMI {
+ VirtIODevice parent;
+ CharBackend chardev;
+ struct vhost_virtqueue *vhost_vqs;
+ struct vhost_dev vhost_dev;
+ VhostUserState vhost_user;
+ VirtQueue *cmd_vq;
+ VirtQueue *event_vq;
+ bool connected;
+ bool started_vu;
+};
+
+#endif /* _QEMU_VHOST_USER_SCMI_H */
diff --git a/include/hw/virtio/vhost-user-scsi.h b/include/hw/virtio/vhost-user-scsi.h
index 99ab2f2cc4..78fe616ccb 100644
--- a/include/hw/virtio/vhost-user-scsi.h
+++ b/include/hw/virtio/vhost-user-scsi.h
@@ -21,14 +21,20 @@
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user.h"
#include "hw/virtio/vhost-scsi-common.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_SCSI "vhost-user-scsi"
-#define VHOST_USER_SCSI(obj) \
- OBJECT_CHECK(VHostUserSCSI, (obj), TYPE_VHOST_USER_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCSI, VHOST_USER_SCSI)
-typedef struct VHostUserSCSI {
+struct VHostUserSCSI {
VHostSCSICommon parent_obj;
+
+ /* Properties */
+ bool connected;
+ bool started_vu;
+
VhostUserState vhost_user;
-} VHostUserSCSI;
+ struct vhost_virtqueue *vhost_vqs;
+};
#endif /* VHOST_USER_SCSI_H */
diff --git a/include/hw/virtio/vhost-user-snd.h b/include/hw/virtio/vhost-user-snd.h
new file mode 100644
index 0000000000..f9260116a7
--- /dev/null
+++ b/include/hw/virtio/vhost-user-snd.h
@@ -0,0 +1,24 @@
+/*
+ * Vhost-user Sound virtio device
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_SND_H
+#define QEMU_VHOST_USER_SND_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_SND "vhost-user-snd"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSound, VHOST_USER_SND)
+
+struct VHostUserSound {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_SND_H */
diff --git a/include/hw/virtio/vhost-user-vsock.h b/include/hw/virtio/vhost-user-vsock.h
index 4e128a4b9f..67aa46c952 100644
--- a/include/hw/virtio/vhost-user-vsock.h
+++ b/include/hw/virtio/vhost-user-vsock.h
@@ -8,22 +8,22 @@
* top-level directory.
*/
-#ifndef _QEMU_VHOST_USER_VSOCK_H
-#define _QEMU_VHOST_USER_VSOCK_H
+#ifndef QEMU_VHOST_USER_VSOCK_H
+#define QEMU_VHOST_USER_VSOCK_H
#include "hw/virtio/vhost-vsock-common.h"
#include "hw/virtio/vhost-user.h"
#include "standard-headers/linux/virtio_vsock.h"
+#include "qom/object.h"
#define TYPE_VHOST_USER_VSOCK "vhost-user-vsock-device"
-#define VHOST_USER_VSOCK(obj) \
- OBJECT_CHECK(VHostUserVSock, (obj), TYPE_VHOST_USER_VSOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserVSock, VHOST_USER_VSOCK)
typedef struct {
CharBackend chardev;
} VHostUserVSockConf;
-typedef struct {
+struct VHostUserVSock {
/*< private >*/
VHostVSockCommon parent;
VhostUserState vhost_user;
@@ -31,6 +31,6 @@ typedef struct {
struct virtio_vsock_config vsockcfg;
/*< public >*/
-} VHostUserVSock;
+};
-#endif /* _QEMU_VHOST_USER_VSOCK_H */
+#endif /* QEMU_VHOST_USER_VSOCK_H */
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index a9abca3288..d7c09ffd34 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -11,19 +11,104 @@
#include "chardev/char-fe.h"
#include "hw/virtio/virtio.h"
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_STATUS = 16,
+ /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
+ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
+ VHOST_USER_PROTOCOL_F_DEVICE_STATE = 19,
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+/**
+ * VhostUserHostNotifier - notifier information for one queue
+ * @rcu: rcu_head for cleanup
+ * @mr: memory region of notifier
+ * @addr: current mapped address
+ * @unmap_addr: address to be un-mapped
+ * @idx: virtioqueue index
+ *
+ * The VhostUserHostNotifier entries are re-used. When an old mapping
+ * is to be released it is moved to @unmap_addr and @addr is replaced.
+ * Once the RCU process has completed the unmap @unmap_addr is
+ * cleared.
+ */
typedef struct VhostUserHostNotifier {
+ struct rcu_head rcu;
MemoryRegion mr;
void *addr;
- bool set;
+ void *unmap_addr;
+ int idx;
} VhostUserHostNotifier;
+/**
+ * VhostUserState - shared state for all vhost-user devices
+ * @chr: the character backend for the socket
+ * @notifiers: GPtrArray of @VhostUserHostnotifier
+ * @memory_slots:
+ */
typedef struct VhostUserState {
CharBackend *chr;
- VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ GPtrArray *notifiers;
int memory_slots;
+ bool supports_config;
} VhostUserState;
+/**
+ * vhost_user_init() - initialise shared vhost_user state
+ * @user: allocated area for storing shared state
+ * @chr: the chardev for the vhost socket
+ * @errp: error handle
+ *
+ * User can either directly g_new() space for the state or embed
+ * VhostUserState in their larger device structure and just point to
+ * it.
+ *
+ * Return: true on success, false on error while setting errp.
+ */
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
+
+/**
+ * vhost_user_cleanup() - cleanup state
+ * @user: ptr to use state
+ *
+ * Cleans up shared state and notifiers, callee is responsible for
+ * freeing the @VhostUserState memory itself.
+ */
void vhost_user_cleanup(VhostUserState *user);
+/**
+ * vhost_user_async_close() - cleanup vhost-user post connection drop
+ * @d: DeviceState for the associated device (passed to callback)
+ * @chardev: the CharBackend associated with the connection
+ * @vhost: the common vhost device
+ * @cb: the user callback function to complete the clean-up
+ *
+ * This function is used to handle the shutdown of a vhost-user
+ * connection to a backend. We handle this centrally to make sure we
+ * do all the steps and handle potential races due to VM shutdowns.
+ * Once the connection is disabled we call a backhalf to ensure
+ */
+typedef void (*vu_async_close_fn)(DeviceState *cb);
+
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb,
+ IOEventHandler *event_cb);
+
#endif
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 6455663388..0a9575b469 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -12,15 +12,84 @@
#ifndef HW_VIRTIO_VHOST_VDPA_H
#define HW_VIRTIO_VHOST_VDPA_H
+#include <gmodule.h>
+
+#include "hw/virtio/vhost-iova-tree.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/virtio.h"
+#include "standard-headers/linux/vhost_types.h"
-typedef struct vhost_vdpa {
+/*
+ * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
+ * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
+ */
+#define VHOST_VDPA_GUEST_PA_ASID 0
+
+typedef struct VhostVDPAHostNotifier {
+ MemoryRegion mr;
+ void *addr;
+} VhostVDPAHostNotifier;
+
+typedef enum SVQTransitionState {
+ SVQ_TSTATE_DISABLING = -1,
+ SVQ_TSTATE_DONE,
+ SVQ_TSTATE_ENABLING
+} SVQTransitionState;
+
+/* Info shared by all vhost_vdpa device models */
+typedef struct vhost_vdpa_shared {
int device_fd;
- uint32_t msg_type;
MemoryListener listener;
+ struct vhost_vdpa_iova_range iova_range;
+ QLIST_HEAD(, vdpa_iommu) iommu_list;
+
+ /* IOVA mapping used by the Shadow Virtqueue */
+ VhostIOVATree *iova_tree;
+
+ /* Copy of backend features */
+ uint64_t backend_cap;
+
+ bool iotlb_batch_begin_sent;
+
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
+
+ /* SVQ switching is in progress, or already completed? */
+ SVQTransitionState svq_switching;
+} VhostVDPAShared;
+
+typedef struct vhost_vdpa {
+ int index;
+ uint32_t address_space_id;
+ uint64_t acked_features;
+ bool shadow_vqs_enabled;
+ /* Device suspended successfully */
+ bool suspended;
+ VhostVDPAShared *shared;
+ GPtrArray *shadow_vqs;
+ const VhostShadowVirtqueueOps *shadow_vq_ops;
+ void *shadow_vq_ops_opaque;
+ struct vhost_dev *dev;
+ Error *migration_blocker;
+ VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
+ IOMMUNotifier n;
} VhostVDPA;
-extern AddressSpace address_space_memory;
-extern int vhost_vdpa_get_device_id(struct vhost_dev *dev,
- uint32_t *device_id);
+int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
+int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
+
+int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly);
+int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
+ hwaddr size);
+
+typedef struct vdpa_iommu {
+ VhostVDPAShared *dev_shared;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(vdpa_iommu) iommu_next;
+} VDPAIOMMUState;
+
+
#endif
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
index f8b4aaae00..75a74e8a99 100644
--- a/include/hw/virtio/vhost-vsock-common.h
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -8,15 +8,16 @@
* top-level directory.
*/
-#ifndef _QEMU_VHOST_VSOCK_COMMON_H
-#define _QEMU_VHOST_VSOCK_COMMON_H
+#ifndef QEMU_VHOST_VSOCK_COMMON_H
+#define QEMU_VHOST_VSOCK_COMMON_H
+#include "qapi/qapi-types-common.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/vhost.h"
+#include "qom/object.h"
#define TYPE_VHOST_VSOCK_COMMON "vhost-vsock-common"
-#define VHOST_VSOCK_COMMON(obj) \
- OBJECT_CHECK(VHostVSockCommon, (obj), TYPE_VHOST_VSOCK_COMMON)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostVSockCommon, VHOST_VSOCK_COMMON)
enum {
VHOST_VSOCK_SAVEVM_VERSION = 0,
@@ -24,7 +25,7 @@ enum {
VHOST_VSOCK_QUEUE_SIZE = 128,
};
-typedef struct {
+struct VHostVSockCommon {
VirtIODevice parent;
struct vhost_virtqueue vhost_vqs[2];
@@ -35,13 +36,18 @@ typedef struct {
VirtQueue *trans_vq;
QEMUTimer *post_load_timer;
-} VHostVSockCommon;
+
+ /* features */
+ OnOffAuto seqpacket;
+};
int vhost_vsock_common_start(VirtIODevice *vdev);
void vhost_vsock_common_stop(VirtIODevice *vdev);
int vhost_vsock_common_pre_save(void *opaque);
int vhost_vsock_common_post_load(void *opaque, int version_id);
-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name);
+void vhost_vsock_common_realize(VirtIODevice *vdev);
void vhost_vsock_common_unrealize(VirtIODevice *vdev);
+uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp);
-#endif /* _QEMU_VHOST_VSOCK_COMMON_H */
+#endif /* QEMU_VHOST_VSOCK_COMMON_H */
diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
index 8cbb7b90f9..84f4e727c7 100644
--- a/include/hw/virtio/vhost-vsock.h
+++ b/include/hw/virtio/vhost-vsock.h
@@ -15,22 +15,22 @@
#define QEMU_VHOST_VSOCK_H
#include "hw/virtio/vhost-vsock-common.h"
+#include "qom/object.h"
#define TYPE_VHOST_VSOCK "vhost-vsock-device"
-#define VHOST_VSOCK(obj) \
- OBJECT_CHECK(VHostVSock, (obj), TYPE_VHOST_VSOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
typedef struct {
uint64_t guest_cid;
char *vhostfd;
} VHostVSockConf;
-typedef struct {
+struct VHostVSock {
/*< private >*/
VHostVSockCommon parent;
VHostVSockConf conf;
/*< public >*/
-} VHostVSock;
+};
#endif /* QEMU_VHOST_VSOCK_H */
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 767a95ec0b..02477788df 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -5,6 +5,11 @@
#include "hw/virtio/virtio.h"
#include "exec/memory.h"
+#define VHOST_F_DEVICE_IOTLB 63
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+#define VU_REALIZE_CONN_RETRIES 3
+
/* Generic structures common for any vhost based device. */
struct vhost_inflight {
@@ -29,6 +34,8 @@ struct vhost_virtqueue {
unsigned long long used_phys;
unsigned used_size;
EventNotifier masked_notifier;
+ EventNotifier error_notifier;
+ EventNotifier masked_config_notifier;
struct vhost_dev *dev;
};
@@ -37,6 +44,7 @@ typedef unsigned long vhost_log_chunk_t;
#define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
#define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
#define VHOST_INVALID_FEATURE_BIT (0xff)
+#define VHOST_QUEUE_NUM_CONFIG_INR 0
struct vhost_log {
unsigned long long size;
@@ -61,6 +69,12 @@ typedef struct VhostDevConfigOps {
} VhostDevConfigOps;
struct vhost_memory;
+
+/**
+ * struct vhost_dev - common vhost_dev structure
+ * @vhost_ops: backend specific ops
+ * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier)
+ */
struct vhost_dev {
VirtIODevice *vdev;
MemoryListener memory_listener;
@@ -71,14 +85,42 @@ struct vhost_dev {
int n_tmp_sections;
MemoryRegionSection *tmp_sections;
struct vhost_virtqueue *vqs;
- int nvqs;
+ unsigned int nvqs;
/* the first virtqueue which would be used by this vhost dev */
int vq_index;
+ /* one past the last vq index for the virtio device (not vhost) */
+ int vq_index_end;
+ /* if non-zero, minimum required value for max_queues */
+ int num_queues;
+ /**
+ * vhost feature handling requires matching the feature set
+ * offered by a backend which may be a subset of the total
+ * features eventually offered to the guest.
+ *
+ * @features: available features provided by the backend
+ * @acked_features: final negotiated features with front-end driver
+ *
+ * @backend_features: this is used in a couple of places to either
+ * store VHOST_USER_F_PROTOCOL_FEATURES to apply to
+ * VHOST_USER_SET_FEATURES or VHOST_NET_F_VIRTIO_NET_HDR. Its
+ * future use should be discouraged and the variable retired as
+ * its easy to confuse with the VirtIO backend_features.
+ */
uint64_t features;
uint64_t acked_features;
uint64_t backend_features;
+
+ /**
+ * @protocol_features: is the vhost-user only feature set by
+ * VHOST_USER_SET_PROTOCOL_FEATURES. Protocol features are only
+ * negotiated if VHOST_USER_F_PROTOCOL_FEATURES has been offered
+ * by the backend (see @features).
+ */
uint64_t protocol_features;
+
uint64_t max_queues;
+ uint64_t backend_cap;
+ /* @started: is the vhost device started? */
bool started;
bool log_enabled;
uint64_t log_size;
@@ -92,6 +134,10 @@ struct vhost_dev {
const VhostDevConfigOps *config_ops;
};
+extern const VhostOps kernel_ops;
+extern const VhostOps user_ops;
+extern const VhostOps vdpa_ops;
+
struct vhost_net {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
@@ -99,14 +145,143 @@ struct vhost_net {
NetClientState *nc;
};
+/**
+ * vhost_dev_init() - initialise the vhost interface
+ * @hdev: the common vhost_dev structure
+ * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa)
+ * @backend_type: type of backend
+ * @busyloop_timeout: timeout for polling virtqueue
+ * @errp: error handle
+ *
+ * The initialisation of the vhost device will trigger the
+ * initialisation of the backend and potentially capability
+ * negotiation of backend interface. Configuration of the VirtIO
+ * itself won't happen until the interface is started.
+ *
+ * Return: 0 on success, non-zero on error while setting errp.
+ */
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type,
- uint32_t busyloop_timeout);
+ uint32_t busyloop_timeout, Error **errp);
+
+/**
+ * vhost_dev_cleanup() - tear down and cleanup vhost interface
+ * @hdev: the common vhost_dev structure
+ */
void vhost_dev_cleanup(struct vhost_dev *hdev);
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+/**
+ * vhost_dev_enable_notifiers() - enable event notifiers
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ *
+ * Enable notifications directly to the vhost device rather than being
+ * triggered by QEMU itself. Notifications should be enabled before
+ * the vhost device is started via @vhost_dev_start.
+ *
+ * Return: 0 on success, < 0 on error.
+ */
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+
+/**
+ * vhost_dev_disable_notifiers - disable event notifications
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ *
+ * Disable direct notifications to vhost device.
+ */
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask);
+
+/**
+ * vhost_dev_is_started() - report status of vhost device
+ * @hdev: common vhost_dev structure
+ *
+ * Return the started status of the vhost device
+ */
+static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
+{
+ return hdev->started;
+}
+
+/**
+ * vhost_dev_start() - start the vhost device
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings enabled in this call
+ *
+ * Starts the vhost device. From this point VirtIO feature negotiation
+ * can start and the device can start processing VirtIO transactions.
+ *
+ * Return: 0 on success, < 0 on error.
+ */
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+
+/**
+ * vhost_dev_stop() - stop the vhost device
+ * @hdev: common vhost_dev structure
+ * @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings disabled in this call
+ *
+ * Stop the vhost device. After the device is stopped the notifiers
+ * can be disabled (@vhost_dev_disable_notifiers) and the device can
+ * be torn down (@vhost_dev_cleanup).
+ */
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
+
+/**
+ * DOC: vhost device configuration handling
+ *
+ * The VirtIO device configuration space is used for rarely changing
+ * or initialisation time parameters. The configuration can be updated
+ * by either the guest driver or the device itself. If the device can
+ * change the configuration over time the vhost handler should
+ * register a @VhostDevConfigOps structure with
+ * @vhost_dev_set_config_notifier so the guest can be notified. Some
+ * devices register a handler anyway and will signal an error if an
+ * unexpected config change happens.
+ */
+
+/**
+ * vhost_dev_get_config() - fetch device configuration
+ * @hdev: common vhost_dev_structure
+ * @config: pointer to device appropriate config structure
+ * @config_len: size of device appropriate config structure
+ *
+ * Return: 0 on success, < 0 on error while setting errp
+ */
+int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
+ uint32_t config_len, Error **errp);
+
+/**
+ * vhost_dev_set_config() - set device configuration
+ * @hdev: common vhost_dev_structure
+ * @data: pointer to data to set
+ * @offset: offset into configuration space
+ * @size: length of set
+ * @flags: @VhostSetConfigType flags
+ *
+ * By use of @offset/@size a subset of the configuration space can be
+ * written to. The @flags are used to indicate if it is a normal
+ * transaction or related to migration.
+ *
+ * Return: 0 on success, non-zero on error
+ */
+int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size, uint32_t flags);
+
+/**
+ * vhost_dev_set_config_notifier() - register VhostDevConfigOps
+ * @hdev: common vhost_dev_structure
+ * @ops: notifier ops
+ *
+ * If the device is expected to change configuration a notifier can be
+ * setup to handle the case.
+ */
+void vhost_dev_set_config_notifier(struct vhost_dev *dev,
+ const VhostDevConfigOps *ops);
+
/* Test and clear masked event pending status.
* Should be called after unmask to avoid losing events.
@@ -117,31 +292,176 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
*/
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask);
+
+/**
+ * vhost_get_features() - return a sanitised set of feature bits
+ * @hdev: common vhost_dev structure
+ * @feature_bits: pointer to terminated table of feature bits
+ * @features: original feature set
+ *
+ * This returns a set of features bits that is an intersection of what
+ * is supported by the vhost backend (hdev->features), the supported
+ * feature_bits and the requested feature set.
+ */
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features);
+
+/**
+ * vhost_ack_features() - set vhost acked_features
+ * @hdev: common vhost_dev structure
+ * @feature_bits: pointer to terminated table of feature bits
+ * @features: requested feature set
+ *
+ * This sets the internal hdev->acked_features to the intersection of
+ * the backends advertised features and the supported feature_bits.
+ */
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features);
-bool vhost_has_free_slot(void);
+unsigned int vhost_get_max_memslots(void);
+unsigned int vhost_get_free_memslots(void);
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file);
+void vhost_toggle_device_iotlb(VirtIODevice *vdev);
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
-int vhost_dev_get_config(struct vhost_dev *dev, uint8_t *config,
- uint32_t config_len);
-int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
- uint32_t offset, uint32_t size, uint32_t flags);
-/* notifier callback in case vhost device config space changed
- */
-void vhost_dev_set_config_notifier(struct vhost_dev *dev,
- const VhostDevConfigOps *ops);
+
+int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq, unsigned idx);
+void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq, unsigned idx);
void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
struct vhost_inflight *inflight);
+bool vhost_dev_has_iommu(struct vhost_dev *dev);
+
+#ifdef CONFIG_VHOST
+int vhost_reset_device(struct vhost_dev *hdev);
+#else
+static inline int vhost_reset_device(struct vhost_dev *hdev)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_VHOST */
+
+/**
+ * vhost_supports_device_state(): Checks whether the back-end supports
+ * transferring internal device state for the purpose of migration.
+ * Support for this feature is required for vhost_set_device_state_fd()
+ * and vhost_check_device_state().
+ *
+ * @dev: The vhost device
+ *
+ * Returns true if the device supports these commands, and false if it
+ * does not.
+ */
+bool vhost_supports_device_state(struct vhost_dev *dev);
+
+/**
+ * vhost_set_device_state_fd(): Begin transfer of internal state from/to
+ * the back-end for the purpose of migration. Data is to be transferred
+ * over a pipe according to @direction and @phase. The sending end must
+ * only write to the pipe, and the receiving end must only read from it.
+ * Once the sending end is done, it closes its FD. The receiving end
+ * must take this as the end-of-transfer signal and close its FD, too.
+ *
+ * @fd is the back-end's end of the pipe: The write FD for SAVE, and the
+ * read FD for LOAD. This function transfers ownership of @fd to the
+ * back-end, i.e. closes it in the front-end.
+ *
+ * The back-end may optionally reply with an FD of its own, if this
+ * improves efficiency on its end. In this case, the returned FD is
+ * stored in *reply_fd. The back-end will discard the FD sent to it,
+ * and the front-end must use *reply_fd for transferring state to/from
+ * the back-end.
+ *
+ * @dev: The vhost device
+ * @direction: The direction in which the state is to be transferred.
+ * For outgoing migrations, this is SAVE, and data is read
+ * from the back-end and stored by the front-end in the
+ * migration stream.
+ * For incoming migrations, this is LOAD, and data is read
+ * by the front-end from the migration stream and sent to
+ * the back-end to restore the saved state.
+ * @phase: Which migration phase we are in. Currently, there is only
+ * STOPPED (device and all vrings are stopped), in the future,
+ * more phases such as PRE_COPY or POST_COPY may be added.
+ * @fd: Back-end's end of the pipe through which to transfer state; note
+ * that ownership is transferred to the back-end, so this function
+ * closes @fd in the front-end.
+ * @reply_fd: If the back-end wishes to use a different pipe for state
+ * transfer, this will contain an FD for the front-end to
+ * use. Otherwise, -1 is stored here.
+ * @errp: Potential error description
+ *
+ * Returns 0 on success, and -errno on failure.
+ */
+int vhost_set_device_state_fd(struct vhost_dev *dev,
+ VhostDeviceStateDirection direction,
+ VhostDeviceStatePhase phase,
+ int fd,
+ int *reply_fd,
+ Error **errp);
+
+/**
+ * vhost_set_device_state_fd(): After transferring state from/to the
+ * back-end via vhost_set_device_state_fd(), i.e. once the sending end
+ * has closed the pipe, inquire the back-end to report any potential
+ * errors that have occurred on its side. This allows to sense errors
+ * like:
+ * - During outgoing migration, when the source side had already started
+ * to produce its state, something went wrong and it failed to finish
+ * - During incoming migration, when the received state is somehow
+ * invalid and cannot be processed by the back-end
+ *
+ * @dev: The vhost device
+ * @errp: Potential error description
+ *
+ * Returns 0 when the back-end reports successful state transfer and
+ * processing, and -errno when an error occurred somewhere.
+ */
+int vhost_check_device_state(struct vhost_dev *dev, Error **errp);
+
+/**
+ * vhost_save_backend_state(): High-level function to receive a vhost
+ * back-end's state, and save it in @f. Uses
+ * `vhost_set_device_state_fd()` to get the data from the back-end, and
+ * stores it in consecutive chunks that are each prefixed by their
+ * respective length (be32). The end is marked by a 0-length chunk.
+ *
+ * Must only be called while the device and all its vrings are stopped
+ * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`).
+ *
+ * @dev: The vhost device from which to save the state
+ * @f: Migration stream in which to save the state
+ * @errp: Potential error message
+ *
+ * Returns 0 on success, and -errno otherwise.
+ */
+int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+
+/**
+ * vhost_load_backend_state(): High-level function to load a vhost
+ * back-end's state from @f, and send it over to the back-end. Reads
+ * the data from @f in the format used by `vhost_save_state()`, and uses
+ * `vhost_set_device_state_fd()` to transfer it to the back-end.
+ *
+ * Must only be called while the device and all its vrings are stopped
+ * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`).
+ *
+ * @dev: The vhost device to which to send the state
+ * @f: Migration stream from which to load the state
+ * @errp: Potential error message
+ *
+ * Returns 0 on success, and -errno otherwise.
+ */
+int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp);
+
#endif
diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h
index 6818a23a2d..07aae69042 100644
--- a/include/hw/virtio/virtio-access.h
+++ b/include/hw/virtio/virtio-access.h
@@ -28,7 +28,7 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
#if defined(LEGACY_VIRTIO_IS_BIENDIAN)
return virtio_is_big_endian(vdev);
-#elif defined(TARGET_WORDS_BIGENDIAN)
+#elif TARGET_BIG_ENDIAN
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
@@ -149,7 +149,7 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap16(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap16(s) : s;
@@ -215,7 +215,7 @@ static inline void virtio_tswap16s(VirtIODevice *vdev, uint16_t *s)
static inline uint32_t virtio_tswap32(VirtIODevice *vdev, uint32_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap32(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap32(s) : s;
@@ -229,7 +229,7 @@ static inline void virtio_tswap32s(VirtIODevice *vdev, uint32_t *s)
static inline uint64_t virtio_tswap64(VirtIODevice *vdev, uint64_t s)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return virtio_access_is_big_endian(vdev) ? s : bswap64(s);
#else
return virtio_access_is_big_endian(vdev) ? bswap64(s) : s;
diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h
new file mode 100644
index 0000000000..cace2a315f
--- /dev/null
+++ b/include/hw/virtio/virtio-acpi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for virtio
+ */
+
+#ifndef VIRTIO_ACPI_H
+#define VIRTIO_ACPI_H
+
+#include "exec/hwaddr.h"
+
+void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base,
+ const hwaddr virtio_mmio_size, uint32_t mmio_irq,
+ long int start_index, int num);
+
+#endif
diff --git a/include/hw/virtio/virtio-balloon.h b/include/hw/virtio/virtio-balloon.h
index 28fd2b3960..5139cf8ab6 100644
--- a/include/hw/virtio/virtio-balloon.h
+++ b/include/hw/virtio/virtio-balloon.h
@@ -18,10 +18,10 @@
#include "standard-headers/linux/virtio_balloon.h"
#include "hw/virtio/virtio.h"
#include "sysemu/iothread.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_BALLOON "virtio-balloon-device"
-#define VIRTIO_BALLOON(obj) \
- OBJECT_CHECK(VirtIOBalloon, (obj), TYPE_VIRTIO_BALLOON)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBalloon, VIRTIO_BALLOON)
#define VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN 0x80000000
@@ -40,7 +40,7 @@ enum virtio_balloon_free_page_hint_status {
FREE_PAGE_HINT_S_DONE = 3,
};
-typedef struct VirtIOBalloon {
+struct VirtIOBalloon {
VirtIODevice parent_obj;
VirtQueue *ivq, *dvq, *svq, *free_page_vq, *reporting_vq;
uint32_t free_page_hint_status;
@@ -71,6 +71,6 @@ typedef struct VirtIOBalloon {
bool qemu_4_0_config_size;
uint32_t poison_val;
-} VirtIOBalloon;
+};
#endif
diff --git a/include/hw/virtio/virtio-blk-common.h b/include/hw/virtio/virtio-blk-common.h
new file mode 100644
index 0000000000..31daada3e3
--- /dev/null
+++ b/include/hw/virtio/virtio-blk-common.h
@@ -0,0 +1,20 @@
+/*
+ * Virtio Block Device common helpers
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef VIRTIO_BLK_COMMON_H
+#define VIRTIO_BLK_COMMON_H
+
+#include "hw/virtio/virtio.h"
+
+extern const VirtIOConfigSizeParams virtio_blk_cfg_size_params;
+
+#endif
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index b1334c3904..5c14110c4b 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -19,10 +19,12 @@
#include "hw/block/block.h"
#include "sysemu/iothread.h"
#include "sysemu/block-backend.h"
+#include "sysemu/block-ram-registrar.h"
+#include "qom/object.h"
+#include "qapi/qapi-types-virtio.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device"
-#define VIRTIO_BLK(obj) \
- OBJECT_CHECK(VirtIOBlock, (obj), TYPE_VIRTIO_BLK)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
/* This is the last element of the write scatter-gather list */
struct virtio_blk_inhdr
@@ -30,44 +32,57 @@ struct virtio_blk_inhdr
unsigned char status;
};
+#define VIRTIO_BLK_AUTO_NUM_QUEUES UINT16_MAX
+
struct VirtIOBlkConf
{
BlockConf conf;
IOThread *iothread;
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
char *serial;
uint32_t request_merging;
uint16_t num_queues;
uint16_t queue_size;
bool seg_max_adjust;
+ bool report_discard_granularity;
uint32_t max_discard_sectors;
uint32_t max_write_zeroes_sectors;
bool x_enable_wce_if_config_wce;
};
-struct VirtIOBlockDataPlane;
-
struct VirtIOBlockReq;
-typedef struct VirtIOBlock {
+struct VirtIOBlock {
VirtIODevice parent_obj;
BlockBackend *blk;
- void *rq;
- QEMUBH *bh;
+ QemuMutex rq_lock;
+ struct VirtIOBlockReq *rq; /* protected by rq_lock */
VirtIOBlkConf conf;
unsigned short sector_mask;
bool original_wce;
VMChangeStateEntry *change;
- bool dataplane_disabled;
- bool dataplane_started;
- struct VirtIOBlockDataPlane *dataplane;
+ bool ioeventfd_disabled;
+ bool ioeventfd_started;
+ bool ioeventfd_starting;
+ bool ioeventfd_stopping;
+
+ /*
+ * The AioContext for each virtqueue. The BlockDriverState will use the
+ * first element as its AioContext.
+ */
+ AioContext **vq_aio_context;
+
uint64_t host_features;
size_t config_size;
-} VirtIOBlock;
+ BlockRAMRegistrar blk_ram_registrar;
+};
typedef struct VirtIOBlockReq {
VirtQueueElement elem;
int64_t sector_num;
VirtIOBlock *dev;
VirtQueue *vq;
+ IOVDiscardUndo inhdr_undo;
+ IOVDiscardUndo outhdr_undo;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
@@ -85,7 +100,6 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
-void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
#endif
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 0f6f215925..7ab8c9dab0 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -27,17 +27,16 @@
#include "hw/qdev-core.h"
#include "hw/virtio/virtio.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_BUS "virtio-bus"
-#define VIRTIO_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtioBusClass, obj, TYPE_VIRTIO_BUS)
-#define VIRTIO_BUS_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtioBusClass, klass, TYPE_VIRTIO_BUS)
-#define VIRTIO_BUS(obj) OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_BUS)
-
+typedef struct VirtioBusClass VirtioBusClass;
typedef struct VirtioBusState VirtioBusState;
+DECLARE_OBJ_CHECKERS(VirtioBusState, VirtioBusClass,
+ VIRTIO_BUS, TYPE_VIRTIO_BUS)
+
-typedef struct VirtioBusClass {
+struct VirtioBusClass {
/* This is what a VirtioBus must implement */
BusClass parent;
void (*notify)(DeviceState *d, uint16_t vector);
@@ -94,7 +93,8 @@ typedef struct VirtioBusClass {
*/
bool has_variable_vring_alignment;
AddressSpace *(*get_dma_as)(DeviceState *d);
-} VirtioBusClass;
+ bool (*iommu_enabled)(DeviceState *d);
+};
struct VirtioBusState {
BusState parent_obj;
@@ -155,5 +155,6 @@ void virtio_bus_release_ioeventfd(VirtioBusState *bus);
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign);
/* Tell the bus that the ioeventfd handler is no longer required. */
void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n);
-
+/* Whether the IOMMU is enabled for this device */
+bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev);
#endif /* VIRTIO_BUS_H */
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
index ffe2391ece..348749f5d5 100644
--- a/include/hw/virtio/virtio-crypto.h
+++ b/include/hw/virtio/virtio-crypto.h
@@ -18,6 +18,7 @@
#include "hw/virtio/virtio.h"
#include "sysemu/iothread.h"
#include "sysemu/cryptodev.h"
+#include "qom/object.h"
#define DEBUG_VIRTIO_CRYPTO 0
@@ -31,8 +32,7 @@ do { \
#define TYPE_VIRTIO_CRYPTO "virtio-crypto-device"
-#define VIRTIO_CRYPTO(obj) \
- OBJECT_CHECK(VirtIOCrypto, (obj), TYPE_VIRTIO_CRYPTO)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOCrypto, VIRTIO_CRYPTO)
#define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
@@ -50,6 +50,7 @@ typedef struct VirtIOCryptoConf {
uint32_t mac_algo_l;
uint32_t mac_algo_h;
uint32_t aead_algo;
+ uint32_t akcipher_algo;
/* Maximum length of cipher key */
uint32_t max_cipher_key_len;
@@ -71,9 +72,7 @@ typedef struct VirtIOCryptoReq {
size_t in_len;
VirtQueue *vq;
struct VirtIOCrypto *vcrypto;
- union {
- CryptoDevBackendSymOpInfo *sym_op_info;
- } u;
+ CryptoDevBackendOpInfo op_info;
} VirtIOCryptoReq;
typedef struct VirtIOCryptoQueue {
@@ -82,7 +81,7 @@ typedef struct VirtIOCryptoQueue {
struct VirtIOCrypto *vcrypto;
} VirtIOCryptoQueue;
-typedef struct VirtIOCrypto {
+struct VirtIOCrypto {
VirtIODevice parent_obj;
VirtQueue *ctrl_vq;
@@ -97,6 +96,6 @@ typedef struct VirtIOCrypto {
uint32_t curr_queues;
size_t config_size;
uint8_t vhost_started;
-} VirtIOCrypto;
+};
#endif /* QEMU_VIRTIO_CRYPTO_H */
diff --git a/include/hw/virtio/virtio-dmabuf.h b/include/hw/virtio/virtio-dmabuf.h
new file mode 100644
index 0000000000..627c3b6db7
--- /dev/null
+++ b/include/hw/virtio/virtio-dmabuf.h
@@ -0,0 +1,100 @@
+/*
+ * Virtio Shared dma-buf
+ *
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors:
+ * Albert Esteve <aesteve@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef VIRTIO_DMABUF_H
+#define VIRTIO_DMABUF_H
+
+#include "qemu/uuid.h"
+#include "vhost.h"
+
+typedef enum SharedObjectType {
+ TYPE_INVALID = 0,
+ TYPE_DMABUF,
+ TYPE_VHOST_DEV,
+} SharedObjectType;
+
+typedef struct VirtioSharedObject {
+ SharedObjectType type;
+ gpointer value;
+} VirtioSharedObject;
+
+/**
+ * virtio_add_dmabuf() - Add a new dma-buf resource to the lookup table
+ * @uuid: new resource's UUID
+ * @dmabuf_fd: the dma-buf descriptor that will be stored and shared with
+ * other virtio devices. The caller retains ownership over the
+ * descriptor and its lifecycle.
+ *
+ * Note: @dmabuf_fd must be a valid (non-negative) file descriptor.
+ *
+ * Return: true if the UUID did not exist and the resource has been added,
+ * false if another resource with the same UUID already existed.
+ * Note that if it finds a repeated UUID, the resource is not inserted in
+ * the lookup table.
+ */
+bool virtio_add_dmabuf(QemuUUID *uuid, int dmabuf_fd);
+
+/**
+ * virtio_add_vhost_device() - Add a new exporter vhost device that holds the
+ * resource with the associated UUID
+ * @uuid: new resource's UUID
+ * @dev: the pointer to the vhost device that holds the resource. The caller
+ * retains ownership over the device struct and its lifecycle.
+ *
+ * Return: true if the UUID did not exist and the device has been tracked,
+ * false if another resource with the same UUID already existed.
+ * Note that if it finds a repeated UUID, the resource is not inserted in
+ * the lookup table.
+ */
+bool virtio_add_vhost_device(QemuUUID *uuid, struct vhost_dev *dev);
+
+/**
+ * virtio_remove_resource() - Removes a resource from the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: true if the UUID has been found and removed from the lookup table.
+ */
+bool virtio_remove_resource(const QemuUUID *uuid);
+
+/**
+ * virtio_lookup_dmabuf() - Looks for a dma-buf resource in the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: the dma-buf file descriptor integer, or -1 if the key is not found.
+ */
+int virtio_lookup_dmabuf(const QemuUUID *uuid);
+
+/**
+ * virtio_lookup_vhost_device() - Looks for an exporter vhost device in the
+ * lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: pointer to the vhost_dev struct, or NULL if the key is not found.
+ */
+struct vhost_dev *virtio_lookup_vhost_device(const QemuUUID *uuid);
+
+/**
+ * virtio_object_type() - Looks for the type of resource in the lookup table
+ * @uuid: resource's UUID
+ *
+ * Return: the type of resource associated with the UUID, or TYPE_INVALID if
+ * the key is not found.
+ */
+SharedObjectType virtio_object_type(const QemuUUID *uuid);
+
+/**
+ * virtio_free_resources() - Destroys all keys and values of the shared
+ * resources lookup table, and frees them
+ */
+void virtio_free_resources(void);
+
+#endif /* VIRTIO_DMABUF_H */
diff --git a/include/hw/virtio/virtio-gpu-bswap.h b/include/hw/virtio/virtio-gpu-bswap.h
index 203f9e1718..dd1975e2d4 100644
--- a/include/hw/virtio/virtio-gpu-bswap.h
+++ b/include/hw/virtio/virtio-gpu-bswap.h
@@ -24,13 +24,12 @@ virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
le32_to_cpus(&hdr->flags);
le64_to_cpus(&hdr->fence_id);
le32_to_cpus(&hdr->ctx_id);
- le32_to_cpus(&hdr->padding);
}
static inline void
virtio_gpu_bswap_32(void *ptr, size_t size)
{
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
size_t i;
struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
@@ -59,4 +58,38 @@ virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
le32_to_cpus(&t2d->padding);
}
+static inline void
+virtio_gpu_create_blob_bswap(struct virtio_gpu_resource_create_blob *cblob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&cblob->hdr);
+ le32_to_cpus(&cblob->resource_id);
+ le32_to_cpus(&cblob->blob_mem);
+ le32_to_cpus(&cblob->blob_flags);
+ le32_to_cpus(&cblob->nr_entries);
+ le64_to_cpus(&cblob->blob_id);
+ le64_to_cpus(&cblob->size);
+}
+
+static inline void
+virtio_gpu_map_blob_bswap(struct virtio_gpu_resource_map_blob *mblob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&mblob->hdr);
+ le32_to_cpus(&mblob->resource_id);
+ le64_to_cpus(&mblob->offset);
+}
+
+static inline void
+virtio_gpu_unmap_blob_bswap(struct virtio_gpu_resource_unmap_blob *ublob)
+{
+ virtio_gpu_ctrl_hdr_bswap(&ublob->hdr);
+ le32_to_cpus(&ublob->resource_id);
+}
+
+static inline void
+virtio_gpu_scanout_blob_bswap(struct virtio_gpu_set_scanout_blob *ssb)
+{
+ virtio_gpu_bswap_32(ssb, sizeof(*ssb) - sizeof(ssb->offsets[3]));
+ le32_to_cpus(&ssb->offsets[3]);
+}
+
#endif
diff --git a/include/hw/virtio/virtio-gpu-pci.h b/include/hw/virtio/virtio-gpu-pci.h
index 2f69b5a9cc..225cbbc2e4 100644
--- a/include/hw/virtio/virtio-gpu-pci.h
+++ b/include/hw/virtio/virtio-gpu-pci.h
@@ -16,15 +16,14 @@
#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-gpu.h"
+#include "qom/object.h"
-typedef struct VirtIOGPUPCIBase VirtIOGPUPCIBase;
/*
* virtio-gpu-pci-base: This extends VirtioPCIProxy.
*/
#define TYPE_VIRTIO_GPU_PCI_BASE "virtio-gpu-pci-base"
-#define VIRTIO_GPU_PCI_BASE(obj) \
- OBJECT_CHECK(VirtIOGPUPCIBase, (obj), TYPE_VIRTIO_GPU_PCI_BASE)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUPCIBase, VIRTIO_GPU_PCI_BASE)
struct VirtIOGPUPCIBase {
VirtIOPCIProxy parent_obj;
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 6dd57f2025..ed44cdad6b 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -22,22 +22,24 @@
#include "sysemu/vhost-user-backend.h"
#include "standard-headers/linux/virtio_gpu.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base"
-#define VIRTIO_GPU_BASE(obj) \
- OBJECT_CHECK(VirtIOGPUBase, (obj), TYPE_VIRTIO_GPU_BASE)
-#define VIRTIO_GPU_BASE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOGPUBaseClass, obj, TYPE_VIRTIO_GPU_BASE)
-#define VIRTIO_GPU_BASE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOGPUBaseClass, klass, TYPE_VIRTIO_GPU_BASE)
+OBJECT_DECLARE_TYPE(VirtIOGPUBase, VirtIOGPUBaseClass,
+ VIRTIO_GPU_BASE)
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
-#define VIRTIO_GPU(obj) \
- OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)
+OBJECT_DECLARE_TYPE(VirtIOGPU, VirtIOGPUClass, VIRTIO_GPU)
+
+#define TYPE_VIRTIO_GPU_GL "virtio-gpu-gl-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL)
#define TYPE_VHOST_USER_GPU "vhost-user-gpu"
+OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU)
-#define VIRTIO_ID_GPU 16
+#define TYPE_VIRTIO_GPU_RUTABAGA "virtio-gpu-rutabaga-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPURutabaga, VIRTIO_GPU_RUTABAGA)
struct virtio_gpu_simple_resource {
uint32_t resource_id;
@@ -49,10 +51,27 @@ struct virtio_gpu_simple_resource {
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
+#ifdef WIN32
+ HANDLE handle;
+#endif
uint64_t hostmem;
+
+ uint64_t blob_size;
+ void *blob;
+ int dmabuf_fd;
+ uint8_t *remapped;
+
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};
+struct virtio_gpu_framebuffer {
+ pixman_format_code_t format;
+ uint32_t bytes_pp;
+ uint32_t width, height;
+ uint32_t stride;
+ uint32_t offset;
+};
+
struct virtio_gpu_scanout {
QemuConsole *con;
DisplaySurface *ds;
@@ -62,10 +81,13 @@ struct virtio_gpu_scanout {
uint32_t resource_id;
struct virtio_gpu_update_cursor cursor;
QEMUCursor *current_cursor;
+ struct virtio_gpu_framebuffer fb;
};
struct virtio_gpu_requested_state {
+ uint16_t width_mm, height_mm;
uint32_t width, height;
+ uint32_t refresh_rate;
int x, y;
};
@@ -73,6 +95,10 @@ enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
VIRTIO_GPU_FLAG_STATS_ENABLED,
VIRTIO_GPU_FLAG_EDID_ENABLED,
+ VIRTIO_GPU_FLAG_DMABUF_ENABLED,
+ VIRTIO_GPU_FLAG_BLOB_ENABLED,
+ VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED,
+ VIRTIO_GPU_FLAG_RUTABAGA_ENABLED,
};
#define virtio_gpu_virgl_enabled(_cfg) \
@@ -81,12 +107,23 @@ enum virtio_gpu_base_conf_flags {
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
#define virtio_gpu_edid_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED))
+#define virtio_gpu_dmabuf_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED))
+#define virtio_gpu_blob_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED))
+#define virtio_gpu_context_init_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED))
+#define virtio_gpu_rutabaga_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED))
+#define virtio_gpu_hostmem_enabled(_cfg) \
+ (_cfg.hostmem > 0)
struct virtio_gpu_base_conf {
uint32_t max_outputs;
uint32_t flags;
uint32_t xres;
uint32_t yres;
+ uint64_t hostmem;
};
struct virtio_gpu_ctrl_command {
@@ -98,38 +135,46 @@ struct virtio_gpu_ctrl_command {
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
};
-typedef struct VirtIOGPUBase {
+struct VirtIOGPUBase {
VirtIODevice parent_obj;
Error *migration_blocker;
struct virtio_gpu_base_conf conf;
struct virtio_gpu_config virtio_config;
+ const GraphicHwOps *hw_ops;
- bool use_virgl_renderer;
int renderer_blocked;
int enable;
+ MemoryRegion hostmem;
+
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
int enabled_output_bitmask;
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
-} VirtIOGPUBase;
+};
-typedef struct VirtIOGPUBaseClass {
+struct VirtIOGPUBaseClass {
VirtioDeviceClass parent;
- void (*gl_unblock)(VirtIOGPUBase *g);
-} VirtIOGPUBaseClass;
+ void (*gl_flushed)(VirtIOGPUBase *g);
+};
#define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \
DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \
DEFINE_PROP_BIT("edid", _state, _conf.flags, \
VIRTIO_GPU_FLAG_EDID_ENABLED, true), \
- DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \
- DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768)
+ DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \
+ DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800)
+
+typedef struct VGPUDMABuf {
+ QemuDmaBuf buf;
+ uint32_t scanout_id;
+ QTAILQ_ENTRY(VGPUDMABuf) next;
+} VGPUDMABuf;
-typedef struct VirtIOGPU {
+struct VirtIOGPU {
VirtIOGPUBase parent_obj;
uint64_t conf_max_hostmem;
@@ -139,6 +184,9 @@ typedef struct VirtIOGPU {
QEMUBH *ctrl_bh;
QEMUBH *cursor_bh;
+ QEMUBH *reset_bh;
+ QemuCond reset_cond;
+ bool reset_finished;
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
@@ -146,8 +194,7 @@ typedef struct VirtIOGPU {
uint64_t hostmem;
- bool renderer_inited;
- bool renderer_reset;
+ bool processing_cmdq;
QEMUTimer *fence_poll;
QEMUTimer *print_stats;
@@ -158,9 +205,34 @@ typedef struct VirtIOGPU {
uint32_t req_3d;
uint32_t bytes_3d;
} stats;
-} VirtIOGPU;
-typedef struct VhostUserGPU {
+ struct {
+ QTAILQ_HEAD(, VGPUDMABuf) bufs;
+ VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
+ } dmabuf;
+};
+
+struct VirtIOGPUClass {
+ VirtIOGPUBaseClass parent;
+
+ void (*handle_ctrl)(VirtIODevice *vdev, VirtQueue *vq);
+ void (*process_cmd)(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
+ void (*update_cursor_data)(VirtIOGPU *g,
+ struct virtio_gpu_scanout *s,
+ uint32_t resource_id);
+ void (*resource_destroy)(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res,
+ Error **errp);
+};
+
+struct VirtIOGPUGL {
+ struct VirtIOGPU parent_obj;
+
+ bool renderer_inited;
+ bool renderer_reset;
+};
+
+struct VhostUserGPU {
VirtIOGPUBase parent_obj;
VhostUserBackend *vhost;
@@ -168,18 +240,37 @@ typedef struct VhostUserGPU {
CharBackend vhost_chr;
QemuDmaBuf dmabuf[VIRTIO_GPU_MAX_SCANOUTS];
bool backend_blocked;
-} VhostUserGPU;
+};
+
+#define MAX_SLOTS 4096
-extern const GraphicHwOps virtio_gpu_ops;
+struct MemoryRegionInfo {
+ int used;
+ MemoryRegion mr;
+ uint32_t resource_id;
+};
+
+struct rutabaga;
+
+struct VirtIOGPURutabaga {
+ VirtIOGPU parent_obj;
+ struct MemoryRegionInfo memory_regions[MAX_SLOTS];
+ uint64_t capset_mask;
+ char *wayland_socket_path;
+ char *wsi;
+ bool headless;
+ uint32_t num_capsets;
+ struct rutabaga *rutabaga;
+};
#define VIRTIO_GPU_FILL_CMD(out) do { \
- size_t s; \
- s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
+ size_t virtiogpufillcmd_s_ = \
+ iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
&out, sizeof(out)); \
- if (s != sizeof(out)) { \
+ if (virtiogpufillcmd_s_ != sizeof(out)) { \
qemu_log_mask(LOG_GUEST_ERROR, \
"%s: command size incorrect %zu vs %zu\n", \
- __func__, s, sizeof(out)); \
+ __func__, virtiogpufillcmd_s_, sizeof(out)); \
return; \
} \
} while (0)
@@ -189,11 +280,17 @@ bool virtio_gpu_base_device_realize(DeviceState *qdev,
VirtIOHandleOutput ctrl_cb,
VirtIOHandleOutput cursor_cb,
Error **errp);
+void virtio_gpu_base_device_unrealize(DeviceState *qdev);
void virtio_gpu_base_reset(VirtIOGPUBase *g);
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
struct virtio_gpu_resp_display_info *dpy_info);
+void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid);
/* virtio-gpu.c */
+struct virtio_gpu_simple_resource *
+virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
+
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
struct virtio_gpu_ctrl_hdr *resp,
@@ -206,17 +303,37 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
void virtio_gpu_get_edid(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
- struct virtio_gpu_resource_attach_backing *ab,
+ uint32_t nr_entries, uint32_t offset,
struct virtio_gpu_ctrl_command *cmd,
- uint64_t **addr, struct iovec **iov);
+ uint64_t **addr, struct iovec **iov,
+ uint32_t *niov);
void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
struct iovec *iov, uint32_t count);
+void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res);
void virtio_gpu_process_cmdq(VirtIOGPU *g);
+void virtio_gpu_device_realize(DeviceState *qdev, Error **errp);
+void virtio_gpu_reset(VirtIODevice *vdev);
+void virtio_gpu_simple_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd);
+void virtio_gpu_update_cursor_data(VirtIOGPU *g,
+ struct virtio_gpu_scanout *s,
+ uint32_t resource_id);
+
+/* virtio-gpu-udmabuf.c */
+bool virtio_gpu_have_udmabuf(void);
+void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res);
+void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res);
+int virtio_gpu_update_dmabuf(VirtIOGPU *g,
+ uint32_t scanout_id,
+ struct virtio_gpu_simple_resource *res,
+ struct virtio_gpu_framebuffer *fb,
+ struct virtio_gpu_rect *r);
/* virtio-gpu-3d.c */
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
+void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g);
diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h
index 4fca03e796..e69c0aeca3 100644
--- a/include/hw/virtio/virtio-input.h
+++ b/include/hw/virtio/virtio-input.h
@@ -1,6 +1,8 @@
#ifndef QEMU_VIRTIO_INPUT_H
#define QEMU_VIRTIO_INPUT_H
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
#include "ui/input.h"
#include "sysemu/vhost-user-backend.h"
@@ -9,6 +11,7 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_input.h"
+#include "qom/object.h"
typedef struct virtio_input_absinfo virtio_input_absinfo;
typedef struct virtio_input_config virtio_input_config;
@@ -18,43 +21,32 @@ typedef struct virtio_input_event virtio_input_event;
/* qemu internals */
#define TYPE_VIRTIO_INPUT "virtio-input-device"
-#define VIRTIO_INPUT(obj) \
- OBJECT_CHECK(VirtIOInput, (obj), TYPE_VIRTIO_INPUT)
+OBJECT_DECLARE_TYPE(VirtIOInput, VirtIOInputClass,
+ VIRTIO_INPUT)
#define VIRTIO_INPUT_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT)
-#define VIRTIO_INPUT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOInputClass, obj, TYPE_VIRTIO_INPUT)
-#define VIRTIO_INPUT_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOInputClass, klass, TYPE_VIRTIO_INPUT)
-
-#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device"
-#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device"
-#define TYPE_VIRTIO_MOUSE "virtio-mouse-device"
-#define TYPE_VIRTIO_TABLET "virtio-tablet-device"
-
-#define VIRTIO_INPUT_HID(obj) \
- OBJECT_CHECK(VirtIOInputHID, (obj), TYPE_VIRTIO_INPUT_HID)
+
+#define TYPE_VIRTIO_INPUT_HID "virtio-input-hid-device"
+#define TYPE_VIRTIO_KEYBOARD "virtio-keyboard-device"
+#define TYPE_VIRTIO_MOUSE "virtio-mouse-device"
+#define TYPE_VIRTIO_TABLET "virtio-tablet-device"
+#define TYPE_VIRTIO_MULTITOUCH "virtio-multitouch-device"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHID, VIRTIO_INPUT_HID)
#define VIRTIO_INPUT_HID_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT_HID)
#define TYPE_VIRTIO_INPUT_HOST "virtio-input-host-device"
-#define VIRTIO_INPUT_HOST(obj) \
- OBJECT_CHECK(VirtIOInputHost, (obj), TYPE_VIRTIO_INPUT_HOST)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHost, VIRTIO_INPUT_HOST)
#define VIRTIO_INPUT_HOST_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_INPUT_HOST)
#define TYPE_VHOST_USER_INPUT "vhost-user-input"
-#define VHOST_USER_INPUT(obj) \
- OBJECT_CHECK(VHostUserInput, (obj), TYPE_VHOST_USER_INPUT)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserInput, VHOST_USER_INPUT)
#define VHOST_USER_INPUT_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VHOST_USER_INPUT)
-typedef struct VirtIOInput VirtIOInput;
-typedef struct VirtIOInputClass VirtIOInputClass;
typedef struct VirtIOInputConfig VirtIOInputConfig;
-typedef struct VirtIOInputHID VirtIOInputHID;
-typedef struct VirtIOInputHost VirtIOInputHost;
-typedef struct VHostUserInput VHostUserInput;
struct VirtIOInputConfig {
virtio_input_config config;
@@ -94,7 +86,7 @@ struct VirtIOInputHID {
VirtIOInput parent_obj;
char *display;
uint32_t head;
- QemuInputHandler *handler;
+ const QemuInputHandler *handler;
QemuInputHandlerState *hs;
int ledstate;
bool wheel_axis;
@@ -107,9 +99,7 @@ struct VirtIOInputHost {
};
struct VHostUserInput {
- VirtIOInput parent_obj;
-
- VhostUserBackend *vhost;
+ VHostUserBase parent_obj;
};
void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event);
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
index 49eb105cd8..83a52cc446 100644
--- a/include/hw/virtio/virtio-iommu.h
+++ b/include/hw/virtio/virtio-iommu.h
@@ -23,11 +23,12 @@
#include "standard-headers/linux/virtio_iommu.h"
#include "hw/virtio/virtio.h"
#include "hw/pci/pci.h"
+#include "qom/object.h"
+#include "qapi/qapi-types-virtio.h"
#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
-#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-device-base"
-#define VIRTIO_IOMMU(obj) \
- OBJECT_CHECK(VirtIOIOMMU, (obj), TYPE_VIRTIO_IOMMU)
+#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOIOMMU, VIRTIO_IOMMU)
#define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region"
@@ -37,6 +38,11 @@ typedef struct IOMMUDevice {
int devfn;
IOMMUMemoryRegion iommu_mr;
AddressSpace as;
+ MemoryRegion root; /* The root container of the device */
+ MemoryRegion bypass_mr; /* The alias of shared memory MR */
+ GList *resv_regions;
+ GList *host_resv_ranges;
+ bool probe_done;
} IOMMUDevice;
typedef struct IOMMUPciBus {
@@ -44,7 +50,7 @@ typedef struct IOMMUPciBus {
IOMMUDevice *pbdev[]; /* Parent array is sparse, so dynamically alloc */
} IOMMUPciBus;
-typedef struct VirtIOIOMMU {
+struct VirtIOIOMMU {
VirtIODevice parent_obj;
VirtQueue *req_vq;
VirtQueue *event_vq;
@@ -53,11 +59,16 @@ typedef struct VirtIOIOMMU {
GHashTable *as_by_busptr;
IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX];
PCIBus *primary_bus;
- ReservedRegion *reserved_regions;
- uint32_t nb_reserved_regions;
+ ReservedRegion *prop_resv_regions;
+ uint32_t nr_prop_resv_regions;
GTree *domains;
- QemuMutex mutex;
+ QemuRecMutex mutex;
GTree *endpoints;
-} VirtIOIOMMU;
+ bool boot_bypass;
+ Notifier machine_done;
+ bool granule_frozen;
+ GranuleMode granule_mode;
+ uint8_t aw_bits;
+};
#endif
diff --git a/include/hw/virtio/virtio-md-pci.h b/include/hw/virtio/virtio-md-pci.h
new file mode 100644
index 0000000000..5912e16674
--- /dev/null
+++ b/include/hw/virtio/virtio-md-pci.h
@@ -0,0 +1,44 @@
+/*
+ * Abstract virtio based memory device
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_MD_PCI_H
+#define HW_VIRTIO_MD_PCI_H
+
+#include "hw/virtio/virtio-pci.h"
+#include "qom/object.h"
+
+/*
+ * virtio-md-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_MD_PCI "virtio-md-pci"
+
+OBJECT_DECLARE_TYPE(VirtIOMDPCI, VirtIOMDPCIClass, VIRTIO_MD_PCI)
+
+struct VirtIOMDPCIClass {
+ /* private */
+ VirtioPCIClass parent;
+
+ /* public */
+ void (*unplug_request_check)(VirtIOMDPCI *vmd, Error **errp);
+};
+
+struct VirtIOMDPCI {
+ VirtIOPCIProxy parent_obj;
+};
+
+void virtio_md_pci_pre_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_plug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+void virtio_md_pci_unplug_request(VirtIOMDPCI *vmd, MachineState *ms,
+ Error **errp);
+void virtio_md_pci_unplug(VirtIOMDPCI *vmd, MachineState *ms, Error **errp);
+
+#endif
diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h
index 0778224964..5f5b02b8f9 100644
--- a/include/hw/virtio/virtio-mem.h
+++ b/include/hw/virtio/virtio-mem.h
@@ -17,15 +17,12 @@
#include "hw/virtio/virtio.h"
#include "qapi/qapi-types-misc.h"
#include "sysemu/hostmem.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_MEM "virtio-mem"
-#define VIRTIO_MEM(obj) \
- OBJECT_CHECK(VirtIOMEM, (obj), TYPE_VIRTIO_MEM)
-#define VIRTIO_MEM_CLASS(oc) \
- OBJECT_CLASS_CHECK(VirtIOMEMClass, (oc), TYPE_VIRTIO_MEM)
-#define VIRTIO_MEM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOMEMClass, (obj), TYPE_VIRTIO_MEM)
+OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass,
+ VIRTIO_MEM)
#define VIRTIO_MEM_MEMDEV_PROP "memdev"
#define VIRTIO_MEM_NODE_PROP "node"
@@ -33,8 +30,12 @@
#define VIRTIO_MEM_REQUESTED_SIZE_PROP "requested-size"
#define VIRTIO_MEM_BLOCK_SIZE_PROP "block-size"
#define VIRTIO_MEM_ADDR_PROP "memaddr"
+#define VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "unplugged-inaccessible"
+#define VIRTIO_MEM_EARLY_MIGRATION_PROP "x-early-migration"
+#define VIRTIO_MEM_PREALLOC_PROP "prealloc"
+#define VIRTIO_MEM_DYNAMIC_MEMSLOTS_PROP "dynamic-memslots"
-typedef struct VirtIOMEM {
+struct VirtIOMEM {
VirtIODevice parent_obj;
/* guest -> host request queue */
@@ -44,7 +45,28 @@ typedef struct VirtIOMEM {
int32_t bitmap_size;
unsigned long *bitmap;
- /* assigned memory backend and memory region */
+ /*
+ * With "dynamic-memslots=on": Device memory region in which we dynamically
+ * map the memslots.
+ */
+ MemoryRegion *mr;
+
+ /*
+ * With "dynamic-memslots=on": The individual memslots (aliases into the
+ * memory backend).
+ */
+ MemoryRegion *memslots;
+
+ /* With "dynamic-memslots=on": The total number of memslots. */
+ uint16_t nb_memslots;
+
+ /*
+ * With "dynamic-memslots=on": Size of one memslot (the size of the
+ * last one can differ).
+ */
+ uint64_t memslot_size;
+
+ /* Assigned memory backend with the RAM memory region. */
HostMemoryBackend *memdev;
/* NUMA node */
@@ -65,22 +87,48 @@ typedef struct VirtIOMEM {
/* block size and alignment */
uint64_t block_size;
+ /*
+ * Whether we indicate VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE to the guest.
+ * For !x86 targets this will always be "on" and consequently indicate
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE.
+ */
+ OnOffAuto unplugged_inaccessible;
+
+ /* whether to prealloc memory when plugging new blocks */
+ bool prealloc;
+
+ /*
+ * Whether we migrate properties that are immutable while migration is
+ * active early, before state of other devices and especially, before
+ * migrating any RAM content.
+ */
+ bool early_migration;
+
+ /*
+ * Whether we dynamically map (multiple, if possible) memslots instead of
+ * statically mapping the whole RAM memory region.
+ */
+ bool dynamic_memslots;
+
/* notifiers to notify when "size" changes */
NotifierList size_change_notifiers;
- /* don't migrate unplugged memory */
- NotifierWithReturn precopy_notifier;
-} VirtIOMEM;
+ /* listeners to notify on plug/unplug activity. */
+ QLIST_HEAD(, RamDiscardListener) rdl_list;
+};
-typedef struct VirtIOMEMClass {
+struct VirtIOMEMClass {
/* private */
VirtIODevice parent;
/* public */
void (*fill_device_info)(const VirtIOMEM *vmen, VirtioMEMDeviceInfo *vi);
MemoryRegion *(*get_memory_region)(VirtIOMEM *vmem, Error **errp);
+ void (*decide_memslots)(VirtIOMEM *vmem, unsigned int limit);
+ unsigned int (*get_memslots)(VirtIOMEM *vmem);
void (*add_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
void (*remove_size_change_notifier)(VirtIOMEM *vmem, Notifier *notifier);
-} VirtIOMEMClass;
+ void (*unplug_request_check)(VirtIOMEM *vmem, Error **errp);
+};
#endif
diff --git a/include/hw/virtio/virtio-mmio.h b/include/hw/virtio/virtio-mmio.h
index 7dbfd03dcf..aa49262022 100644
--- a/include/hw/virtio/virtio-mmio.h
+++ b/include/hw/virtio/virtio-mmio.h
@@ -22,22 +22,19 @@
#ifndef HW_VIRTIO_MMIO_H
#define HW_VIRTIO_MMIO_H
+#include "hw/sysbus.h"
#include "hw/virtio/virtio-bus.h"
/* QOM macros */
/* virtio-mmio-bus */
#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
-#define VIRTIO_MMIO_BUS(obj) \
- OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
-#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
-#define VIRTIO_MMIO_BUS_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
+/* This is reusing the VirtioBusState typedef from TYPE_VIRTIO_BUS */
+DECLARE_OBJ_CHECKERS(VirtioBusState, VirtioBusClass,
+ VIRTIO_MMIO_BUS, TYPE_VIRTIO_MMIO_BUS)
/* virtio-mmio */
#define TYPE_VIRTIO_MMIO "virtio-mmio"
-#define VIRTIO_MMIO(obj) \
- OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOMMIOProxy, VIRTIO_MMIO)
#define VIRT_MAGIC 0x74726976 /* 'virt' */
#define VIRT_VERSION 2
@@ -52,12 +49,17 @@ typedef struct VirtIOMMIOQueue {
uint32_t used[2];
} VirtIOMMIOQueue;
-typedef struct {
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT 1
+#define VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD \
+ (1 << VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT)
+
+struct VirtIOMMIOProxy {
/* Generic */
SysBusDevice parent_obj;
MemoryRegion iomem;
qemu_irq irq;
bool legacy;
+ uint32_t flags;
/* Guest accessible state needing migration and reset */
uint32_t host_features_sel;
uint32_t guest_features_sel;
@@ -68,6 +70,6 @@ typedef struct {
/* Fields only used for non-legacy (v2) devices */
uint32_t guest_features[2];
VirtIOMMIOQueue vqs[VIRTIO_QUEUE_MAX];
-} VirtIOMMIOProxy;
+};
#endif
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index a45ef8278e..060c23c04d 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -19,10 +19,12 @@
#include "hw/virtio/virtio.h"
#include "net/announce.h"
#include "qemu/option_int.h"
+#include "qom/object.h"
+
+#include "ebpf/ebpf_rss.h"
#define TYPE_VIRTIO_NET "virtio-net-device"
-#define VIRTIO_NET(obj) \
- OBJECT_CHECK(VirtIONet, (obj), TYPE_VIRTIO_NET)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET)
#define TX_TIMER_INTERVAL 150000 /* 150 us */
@@ -33,6 +35,15 @@
* and latency. */
#define TX_BURST 256
+/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */
+#define MAC_TABLE_ENTRIES 64
+
+/*
+ * The maximum number of VLANs in the VLAN filter table
+ * added by VIRTIO_NET_CTRL_VLAN_ADD
+ */
+#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+
typedef struct virtio_net_conf
{
uint32_t txtimer;
@@ -104,12 +115,11 @@ typedef struct VirtioNetRscSeg {
size_t size;
uint16_t packets;
uint16_t dup_ack;
- bool is_coalesced; /* need recal ipv4 header checksum, mark here */
+ bool is_coalesced; /* need recall ipv4 header checksum, mark here */
VirtioNetRscUnit unit;
NetClientState *nc;
} VirtioNetRscSeg;
-typedef struct VirtIONet VirtIONet;
/* Chain is divided by protocol(ipv4/v6) and NetClientInfo */
typedef struct VirtioNetRscChain {
@@ -131,6 +141,7 @@ typedef struct VirtioNetRscChain {
typedef struct VirtioNetRssData {
bool enabled;
+ bool enabled_software_rss;
bool redirect;
bool populate_hash;
uint32_t hash_types;
@@ -192,8 +203,9 @@ struct VirtIONet {
NICConf nic_conf;
DeviceState *qdev;
int multiqueue;
- uint16_t max_queues;
- uint16_t curr_queues;
+ uint16_t max_queue_pairs;
+ uint16_t curr_queue_pairs;
+ uint16_t max_ncs;
size_t config_size;
char *netclient_name;
char *netclient_type;
@@ -203,21 +215,26 @@ struct VirtIONet {
AnnounceTimer announce_timer;
bool needs_vnet_hdr_swap;
bool mtu_bypass_backend;
- QemuOpts *primary_device_opts;
- QDict *primary_device_dict;
- DeviceState *primary_dev;
- BusState *primary_bus;
- char *primary_device_id;
- char *standby_id;
- bool primary_should_be_hidden;
+ /* primary failover device is hidden*/
+ bool failover_primary_hidden;
bool failover;
DeviceListener primary_listener;
- Notifier migration_state;
+ QDict *primary_opts;
+ bool primary_opts_from_json;
+ NotifierWithReturn migration_state;
VirtioNetRssData rss_data;
struct NetRxPkt *rx_pkt;
+ struct EBPFRSSContext ebpf_rss;
+ uint32_t nr_ebpf_rss_fds;
+ char **ebpf_rss_fds;
};
+size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
+ const struct iovec *in_sg, unsigned in_num,
+ const struct iovec *out_sg,
+ unsigned out_num);
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
const char *type);
+uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n);
#endif
diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h
new file mode 100644
index 0000000000..59d88018c1
--- /dev/null
+++ b/include/hw/virtio/virtio-pci.h
@@ -0,0 +1,272 @@
+/*
+ * Virtio PCI Bindings
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2009 CodeSourcery
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paul Brook <paul@codesourcery.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_VIRTIO_PCI_H
+#define QEMU_VIRTIO_PCI_H
+
+#include "hw/pci/msi.h"
+#include "hw/virtio/virtio-bus.h"
+#include "qom/object.h"
+
+
+/* virtio-pci-bus */
+
+typedef struct VirtioBusState VirtioPCIBusState;
+typedef struct VirtioBusClass VirtioPCIBusClass;
+
+#define TYPE_VIRTIO_PCI_BUS "virtio-pci-bus"
+DECLARE_OBJ_CHECKERS(VirtioPCIBusState, VirtioPCIBusClass,
+ VIRTIO_PCI_BUS, TYPE_VIRTIO_PCI_BUS)
+
+enum {
+ VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
+ VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
+ VIRTIO_PCI_FLAG_ATS_BIT,
+ VIRTIO_PCI_FLAG_INIT_DEVERR_BIT,
+ VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT,
+ VIRTIO_PCI_FLAG_INIT_PM_BIT,
+ VIRTIO_PCI_FLAG_INIT_FLR_BIT,
+ VIRTIO_PCI_FLAG_AER_BIT,
+ VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT,
+};
+
+/* Need to activate work-arounds for buggy guests at vmstate load. */
+#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
+ (1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
+
+/* Performance improves when virtqueue kick processing is decoupled from the
+ * vcpu thread using ioeventfd for some devices. */
+#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
+
+/* virtio version flags */
+#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
+
+/* migrate extra state */
+#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
+
+/* have pio notification for modern device ? */
+#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
+ (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
+
+/* page per vq flag to be used by split drivers within guests */
+#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \
+ (1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT)
+
+/* address space translation service */
+#define VIRTIO_PCI_FLAG_ATS (1 << VIRTIO_PCI_FLAG_ATS_BIT)
+
+/* Init error enabling flags */
+#define VIRTIO_PCI_FLAG_INIT_DEVERR (1 << VIRTIO_PCI_FLAG_INIT_DEVERR_BIT)
+
+/* Init Link Control register */
+#define VIRTIO_PCI_FLAG_INIT_LNKCTL (1 << VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT)
+
+/* Init Power Management */
+#define VIRTIO_PCI_FLAG_INIT_PM (1 << VIRTIO_PCI_FLAG_INIT_PM_BIT)
+
+/* Init Function Level Reset capability */
+#define VIRTIO_PCI_FLAG_INIT_FLR (1 << VIRTIO_PCI_FLAG_INIT_FLR_BIT)
+
+/* Advanced Error Reporting capability */
+#define VIRTIO_PCI_FLAG_AER (1 << VIRTIO_PCI_FLAG_AER_BIT)
+
+/* Page Aligned Address space Translation Service */
+#define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \
+ (1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT)
+
+typedef struct {
+ MSIMessage msg;
+ int virq;
+ unsigned int users;
+} VirtIOIRQFD;
+
+/*
+ * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
+ */
+#define TYPE_VIRTIO_PCI "virtio-pci"
+OBJECT_DECLARE_TYPE(VirtIOPCIProxy, VirtioPCIClass, VIRTIO_PCI)
+
+struct VirtioPCIClass {
+ PCIDeviceClass parent_class;
+ DeviceRealize parent_dc_realize;
+ void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
+};
+
+typedef struct VirtIOPCIRegion {
+ MemoryRegion mr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t type;
+} VirtIOPCIRegion;
+
+typedef struct VirtIOPCIQueue {
+ uint16_t num;
+ bool enabled;
+ /*
+ * No need to migrate the reset status, because it is always 0
+ * when the migration starts.
+ */
+ bool reset;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+} VirtIOPCIQueue;
+
+struct VirtIOPCIProxy {
+ PCIDevice pci_dev;
+ MemoryRegion bar;
+ union {
+ struct {
+ VirtIOPCIRegion common;
+ VirtIOPCIRegion isr;
+ VirtIOPCIRegion device;
+ VirtIOPCIRegion notify;
+ VirtIOPCIRegion notify_pio;
+ };
+ VirtIOPCIRegion regs[5];
+ };
+ MemoryRegion modern_bar;
+ MemoryRegion io_bar;
+ uint32_t legacy_io_bar_idx;
+ uint32_t msix_bar_idx;
+ uint32_t modern_io_bar_idx;
+ uint32_t modern_mem_bar_idx;
+ int config_cap;
+ uint32_t flags;
+ bool disable_modern;
+ bool ignore_backend_features;
+ OnOffAuto disable_legacy;
+ /* Transitional device id */
+ uint16_t trans_devid;
+ uint32_t class_code;
+ uint32_t nvectors;
+ uint32_t dfselect;
+ uint32_t gfselect;
+ uint32_t guest_features[2];
+ VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
+
+ VirtIOIRQFD *vector_irqfd;
+ int nvqs_with_notifiers;
+ VirtioBusState bus;
+};
+
+static inline bool virtio_pci_modern(VirtIOPCIProxy *proxy)
+{
+ return !proxy->disable_modern;
+}
+
+static inline bool virtio_pci_legacy(VirtIOPCIProxy *proxy)
+{
+ return proxy->disable_legacy == ON_OFF_AUTO_OFF;
+}
+
+static inline void virtio_pci_force_virtio_1(VirtIOPCIProxy *proxy)
+{
+ proxy->disable_modern = false;
+ proxy->disable_legacy = ON_OFF_AUTO_ON;
+}
+
+static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy)
+{
+ proxy->disable_modern = true;
+}
+
+uint16_t virtio_pci_get_trans_devid(uint16_t device_id);
+uint16_t virtio_pci_get_class_id(uint16_t device_id);
+
+/*
+ * virtio-input-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci"
+
+/* Virtio ABI version, if we increment this, we break the guest driver. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/* Input for virtio_pci_types_register() */
+typedef struct VirtioPCIDeviceTypeInfo {
+ /*
+ * Common base class for the subclasses below.
+ *
+ * Required only if transitional_name or non_transitional_name is set.
+ *
+ * We need a separate base type instead of making all types
+ * inherit from generic_name for two reasons:
+ * 1) generic_name implements INTERFACE_PCIE_DEVICE, but
+ * transitional_name does not.
+ * 2) generic_name has the "disable-legacy" and "disable-modern"
+ * properties, transitional_name and non_transitional name don't.
+ */
+ const char *base_name;
+ /*
+ * Generic device type. Optional.
+ *
+ * Supports both transitional and non-transitional modes,
+ * using the disable-legacy and disable-modern properties.
+ * If disable-legacy=auto, (non-)transitional mode is selected
+ * depending on the bus where the device is plugged.
+ *
+ * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE,
+ * but PCI Express is supported only in non-transitional mode.
+ *
+ * The only type implemented by QEMU 3.1 and older.
+ */
+ const char *generic_name;
+ /*
+ * The transitional device type. Optional.
+ *
+ * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE.
+ */
+ const char *transitional_name;
+ /*
+ * The non-transitional device type. Optional.
+ *
+ * Implements INTERFACE_CONVENTIONAL_PCI_DEVICE only.
+ */
+ const char *non_transitional_name;
+
+ /* Parent type. If NULL, TYPE_VIRTIO_PCI is used */
+ const char *parent;
+
+ /* Same as TypeInfo fields: */
+ size_t instance_size;
+ size_t class_size;
+ void (*instance_init)(Object *obj);
+ void (*instance_finalize)(Object *obj);
+ void (*class_init)(ObjectClass *klass, void *data);
+ InterfaceInfo *interfaces;
+} VirtioPCIDeviceTypeInfo;
+
+/* Register virtio-pci type(s). @t must be static. */
+void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t);
+
+/**
+ * virtio_pci_optimal_num_queues:
+ * @fixed_queues: number of queues that are always present
+ *
+ * Returns: The optimal number of queues for a multi-queue device, excluding
+ * @fixed_queues.
+ */
+unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues);
+void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq,
+ int n, bool assign,
+ bool with_irqfd);
+
+int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy, uint8_t bar, uint64_t offset,
+ uint64_t length, uint8_t id);
+
+#endif
diff --git a/include/hw/virtio/virtio-pmem.h b/include/hw/virtio/virtio-pmem.h
index 33f1999320..fc4fd1f7fe 100644
--- a/include/hw/virtio/virtio-pmem.h
+++ b/include/hw/virtio/virtio-pmem.h
@@ -15,35 +15,32 @@
#define HW_VIRTIO_PMEM_H
#include "hw/virtio/virtio.h"
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-machine.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_PMEM "virtio-pmem"
-#define VIRTIO_PMEM(obj) \
- OBJECT_CHECK(VirtIOPMEM, (obj), TYPE_VIRTIO_PMEM)
-#define VIRTIO_PMEM_CLASS(oc) \
- OBJECT_CLASS_CHECK(VirtIOPMEMClass, (oc), TYPE_VIRTIO_PMEM)
-#define VIRTIO_PMEM_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOPMEMClass, (obj), TYPE_VIRTIO_PMEM)
+OBJECT_DECLARE_TYPE(VirtIOPMEM, VirtIOPMEMClass,
+ VIRTIO_PMEM)
#define VIRTIO_PMEM_ADDR_PROP "memaddr"
#define VIRTIO_PMEM_MEMDEV_PROP "memdev"
-typedef struct VirtIOPMEM {
+struct VirtIOPMEM {
VirtIODevice parent_obj;
VirtQueue *rq_vq;
uint64_t start;
HostMemoryBackend *memdev;
-} VirtIOPMEM;
+};
-typedef struct VirtIOPMEMClass {
+struct VirtIOPMEMClass {
/* private */
VirtIODevice parent;
/* public */
void (*fill_device_info)(const VirtIOPMEM *pmem, VirtioPMEMDeviceInfo *vi);
MemoryRegion *(*get_memory_region)(VirtIOPMEM *pmem, Error **errp);
-} VirtIOPMEMClass;
+};
#endif
diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h
index bd05d734b8..82734255d9 100644
--- a/include/hw/virtio/virtio-rng.h
+++ b/include/hw/virtio/virtio-rng.h
@@ -15,10 +15,10 @@
#include "hw/virtio/virtio.h"
#include "sysemu/rng.h"
#include "standard-headers/linux/virtio_rng.h"
+#include "qom/object.h"
#define TYPE_VIRTIO_RNG "virtio-rng-device"
-#define VIRTIO_RNG(obj) \
- OBJECT_CHECK(VirtIORNG, (obj), TYPE_VIRTIO_RNG)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIORNG, VIRTIO_RNG)
#define VIRTIO_RNG_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_RNG)
@@ -28,7 +28,7 @@ struct VirtIORNGConf {
uint32_t period_ms;
};
-typedef struct VirtIORNG {
+struct VirtIORNG {
VirtIODevice parent_obj;
/* Only one vq - guest puts buffer(s) on it when it needs entropy */
@@ -46,6 +46,6 @@ typedef struct VirtIORNG {
bool activate_timer;
VMChangeStateEntry *vmstate;
-} VirtIORNG;
+};
#endif
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 24e768909d..7be0105918 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -13,29 +13,32 @@
#ifndef QEMU_VIRTIO_SCSI_H
#define QEMU_VIRTIO_SCSI_H
+#include "qom/object.h"
/* Override CDB/sense data size: they are dynamic (guest controlled) in QEMU */
#define VIRTIO_SCSI_CDB_SIZE 0
#define VIRTIO_SCSI_SENSE_SIZE 0
#include "standard-headers/linux/virtio_scsi.h"
#include "hw/virtio/virtio.h"
-#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "chardev/char-fe.h"
#include "sysemu/iothread.h"
#define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common"
-#define VIRTIO_SCSI_COMMON(obj) \
- OBJECT_CHECK(VirtIOSCSICommon, (obj), TYPE_VIRTIO_SCSI_COMMON)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICommon, VIRTIO_SCSI_COMMON)
#define TYPE_VIRTIO_SCSI "virtio-scsi-device"
-#define VIRTIO_SCSI(obj) \
- OBJECT_CHECK(VirtIOSCSI, (obj), TYPE_VIRTIO_SCSI)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSI, VIRTIO_SCSI)
#define VIRTIO_SCSI_MAX_CHANNEL 0
#define VIRTIO_SCSI_MAX_TARGET 255
#define VIRTIO_SCSI_MAX_LUN 16383
+/* Number of virtqueues that are always present */
+#define VIRTIO_SCSI_VQ_NUM_FIXED 2
+
+#define VIRTIO_SCSI_AUTO_NUM_QUEUES UINT32_MAX
+
typedef struct virtio_scsi_cmd_req VirtIOSCSICmdReq;
typedef struct virtio_scsi_cmd_resp VirtIOSCSICmdResp;
typedef struct virtio_scsi_ctrl_tmf_req VirtIOSCSICtrlTMFReq;
@@ -48,13 +51,12 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
struct VirtIOSCSIConf {
uint32_t num_queues;
uint32_t virtqueue_size;
+ bool worker_per_virtqueue;
bool seg_max_adjust;
uint32_t max_sectors;
uint32_t cmd_per_lun;
-#ifdef CONFIG_VHOST_SCSI
char *vhostfd;
char *wwpn;
-#endif
CharBackend chardev;
uint32_t boot_tpgt;
IOThread *iothread;
@@ -62,7 +64,7 @@ struct VirtIOSCSIConf {
struct VirtIOSCSI;
-typedef struct VirtIOSCSICommon {
+struct VirtIOSCSICommon {
VirtIODevice parent_obj;
VirtIOSCSIConf conf;
@@ -71,15 +73,25 @@ typedef struct VirtIOSCSICommon {
VirtQueue *ctrl_vq;
VirtQueue *event_vq;
VirtQueue **cmd_vqs;
-} VirtIOSCSICommon;
+};
-typedef struct VirtIOSCSI {
+struct VirtIOSCSIReq;
+
+struct VirtIOSCSI {
VirtIOSCSICommon parent_obj;
SCSIBus bus;
- int resetting;
+ int resetting; /* written from main loop thread, read from any thread */
bool events_dropped;
+ /*
+ * TMFs deferred to main loop BH. These fields are protected by
+ * tmf_bh_lock.
+ */
+ QemuMutex tmf_bh_lock;
+ QEMUBH *tmf_bh;
+ QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
+
/* Fields for dataplane below */
AioContext *ctx; /* one iothread per virtio-scsi-pci for now */
@@ -88,57 +100,7 @@ typedef struct VirtIOSCSI {
bool dataplane_stopping;
bool dataplane_fenced;
uint32_t host_features;
-} VirtIOSCSI;
-
-typedef struct VirtIOSCSIReq {
- /* Note:
- * - fields up to resp_iov are initialized by virtio_scsi_init_req;
- * - fields starting at vring are zeroed by virtio_scsi_init_req.
- * */
- VirtQueueElement elem;
-
- VirtIOSCSI *dev;
- VirtQueue *vq;
- QEMUSGList qsgl;
- QEMUIOVector resp_iov;
-
- union {
- /* Used for two-stage request submission */
- QTAILQ_ENTRY(VirtIOSCSIReq) next;
-
- /* Used for cancellation of request during TMFs */
- int remaining;
- };
-
- SCSIRequest *sreq;
- size_t resp_size;
- enum SCSIXferMode mode;
- union {
- VirtIOSCSICmdResp cmd;
- VirtIOSCSICtrlTMFResp tmf;
- VirtIOSCSICtrlANResp an;
- VirtIOSCSIEvent event;
- } resp;
- union {
- VirtIOSCSICmdReq cmd;
- VirtIOSCSICtrlTMFReq tmf;
- VirtIOSCSICtrlANReq an;
- } req;
-} VirtIOSCSIReq;
-
-static inline void virtio_scsi_acquire(VirtIOSCSI *s)
-{
- if (s->ctx) {
- aio_context_acquire(s->ctx);
- }
-}
-
-static inline void virtio_scsi_release(VirtIOSCSI *s)
-{
- if (s->ctx) {
- aio_context_release(s->ctx);
- }
-}
+};
void virtio_scsi_common_realize(DeviceState *dev,
VirtIOHandleOutput ctrl,
@@ -147,13 +109,6 @@ void virtio_scsi_common_realize(DeviceState *dev,
Error **errp);
void virtio_scsi_common_unrealize(DeviceState *dev);
-bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq);
-bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq);
-bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq);
-void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req);
-void virtio_scsi_free_req(VirtIOSCSIReq *req);
-void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
- uint32_t event, uint32_t reason);
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp);
int virtio_scsi_dataplane_start(VirtIODevice *s);
diff --git a/include/hw/virtio/virtio-serial.h b/include/hw/virtio/virtio-serial.h
index ed3e916b68..d87c62eab7 100644
--- a/include/hw/virtio/virtio-serial.h
+++ b/include/hw/virtio/virtio-serial.h
@@ -18,6 +18,7 @@
#include "standard-headers/linux/virtio_console.h"
#include "hw/virtio/virtio.h"
+#include "qom/object.h"
struct virtio_serial_conf {
/* Max. number of ports we can have for a virtio-serial device */
@@ -25,18 +26,16 @@ struct virtio_serial_conf {
};
#define TYPE_VIRTIO_SERIAL_PORT "virtio-serial-port"
-#define VIRTIO_SERIAL_PORT(obj) \
- OBJECT_CHECK(VirtIOSerialPort, (obj), TYPE_VIRTIO_SERIAL_PORT)
-#define VIRTIO_SERIAL_PORT_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOSerialPortClass, (klass), TYPE_VIRTIO_SERIAL_PORT)
-#define VIRTIO_SERIAL_PORT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOSerialPortClass, (obj), TYPE_VIRTIO_SERIAL_PORT)
+OBJECT_DECLARE_TYPE(VirtIOSerialPort, VirtIOSerialPortClass,
+ VIRTIO_SERIAL_PORT)
typedef struct VirtIOSerial VirtIOSerial;
-typedef struct VirtIOSerialBus VirtIOSerialBus;
-typedef struct VirtIOSerialPort VirtIOSerialPort;
-typedef struct VirtIOSerialPortClass {
+#define TYPE_VIRTIO_SERIAL_BUS "virtio-serial-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSerialBus, VIRTIO_SERIAL_BUS)
+
+
+struct VirtIOSerialPortClass {
DeviceClass parent_class;
/* Is this a device that binds with hvc in the guest? */
@@ -81,7 +80,7 @@ typedef struct VirtIOSerialPortClass {
*/
ssize_t (*have_data)(VirtIOSerialPort *port, const uint8_t *buf,
ssize_t len);
-} VirtIOSerialPortClass;
+};
/*
* This is the state that's shared between all the ports. Some of the
@@ -223,7 +222,6 @@ size_t virtio_serial_guest_ready(VirtIOSerialPort *port);
void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle);
#define TYPE_VIRTIO_SERIAL "virtio-serial-device"
-#define VIRTIO_SERIAL(obj) \
- OBJECT_CHECK(VirtIOSerial, (obj), TYPE_VIRTIO_SERIAL)
+OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSerial, VIRTIO_SERIAL)
#endif
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index e424df12cf..7d5ffdc145 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -21,9 +21,16 @@
#include "qemu/event_notifier.h"
#include "standard-headers/linux/virtio_config.h"
#include "standard-headers/linux/virtio_ring.h"
+#include "qom/object.h"
+#include "block/aio.h"
-/* A guest should never accept this. It implies negotiation is broken. */
-#define VIRTIO_F_BAD_FEATURE 30
+/*
+ * A guest should never accept this. It implies negotiation is broken
+ * between the driver frontend and the device. This bit is re-used for
+ * vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU
+ * and a vhost-user backend.
+ */
+#define VIRTIO_F_BAD_FEATURE 30
#define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \
(0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
@@ -42,8 +49,14 @@ typedef struct VirtIOFeature {
size_t end;
} VirtIOFeature;
-size_t virtio_feature_get_config_size(VirtIOFeature *features,
- uint64_t host_features);
+typedef struct VirtIOConfigSizeParams {
+ size_t min_size;
+ size_t max_size;
+ const VirtIOFeature *feature_sizes;
+} VirtIOConfigSizeParams;
+
+size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
+ uint64_t host_features);
typedef struct VirtQueue VirtQueue;
@@ -66,13 +79,16 @@ typedef struct VirtQueueElement
#define VIRTIO_NO_VECTOR 0xffff
+/* special index value used internally for config irqs */
+#define VIRTIO_CONFIG_IRQ_IDX -1
+
#define TYPE_VIRTIO_DEVICE "virtio-device"
-#define VIRTIO_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtioDeviceClass, obj, TYPE_VIRTIO_DEVICE)
-#define VIRTIO_DEVICE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtioDeviceClass, klass, TYPE_VIRTIO_DEVICE)
-#define VIRTIO_DEVICE(obj) \
- OBJECT_CHECK(VirtIODevice, (obj), TYPE_VIRTIO_DEVICE)
+OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
+
+typedef struct {
+ int virtio_bit;
+ const char *feature_desc;
+} qmp_virtio_feature_map_t;
enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_UNKNOWN,
@@ -80,6 +96,12 @@ enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_BIG,
};
+/**
+ * struct VirtIODevice - common VirtIO structure
+ * @name: name of the device
+ * @status: VirtIO Device Status field
+ *
+ */
struct VirtIODevice
{
DeviceState parent_obj;
@@ -87,9 +109,20 @@ struct VirtIODevice
uint8_t status;
uint8_t isr;
uint16_t queue_sel;
- uint64_t guest_features;
+ /**
+ * These fields represent a set of VirtIO features at various
+ * levels of the stack. @host_features indicates the complete
+ * feature set the VirtIO device can offer to the driver.
+ * @guest_features indicates which features the VirtIO driver has
+ * selected by writing to the feature register. Finally
+ * @backend_features represents everything supported by the
+ * backend (e.g. vhost) and could potentially be a subset of the
+ * total feature set offered by QEMU.
+ */
uint64_t host_features;
+ uint64_t guest_features;
uint64_t backend_features;
+
size_t config_len;
void *config;
uint16_t config_vector;
@@ -98,22 +131,43 @@ struct VirtIODevice
VirtQueue *vq;
MemoryListener listener;
uint16_t device_id;
+ /* @vm_running: current VM running state via virtio_vmstate_change() */
bool vm_running;
bool broken; /* device in invalid state, needs reset */
bool use_disabled_flag; /* allow use of 'disable' flag when needed */
bool disabled; /* device in temporarily disabled state */
+ /**
+ * @use_started: true if the @started flag should be used to check the
+ * current state of the VirtIO device. Otherwise status bits
+ * should be checked for a current status of the device.
+ * @use_started is only set via QMP and defaults to true for all
+ * modern machines (since 4.1).
+ */
bool use_started;
bool started;
bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
+ bool disable_legacy_check;
+ bool vhost_started;
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ /**
+ * @user_guest_notifier_mask: gate usage of ->guest_notifier_mask() callback.
+ * This is used to suppress the masking of guest updates for
+ * vhost-user devices which are asynchronous by design.
+ */
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
+ QTAILQ_ENTRY(VirtIODevice) next;
+ /**
+ * @config_notifier: the event notifier that handles config events
+ */
+ EventNotifier config_notifier;
+ bool device_iotlb_enabled;
};
-typedef struct VirtioDeviceClass {
+struct VirtioDeviceClass {
/*< private >*/
DeviceClass parent;
/*< public >*/
@@ -131,6 +185,10 @@ typedef struct VirtioDeviceClass {
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
void (*set_status)(VirtIODevice *vdev, uint8_t val);
+ /* Device must validate queue_index. */
+ void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index);
+ /* Device must validate queue_index. */
+ void (*queue_enable)(VirtIODevice *vdev, uint32_t queue_index);
/* For transitional devices, this is a bitmap of features
* that are only exposed on the legacy interface but not
* the modern one.
@@ -163,22 +221,29 @@ typedef struct VirtioDeviceClass {
int (*post_load)(VirtIODevice *vdev);
const VMStateDescription *vmsd;
bool (*primary_unplug_pending)(void *opaque);
-} VirtioDeviceClass;
+ struct vhost_dev *(*get_vhost)(VirtIODevice *vdev);
+ void (*toggle_device_iotlb)(VirtIODevice *vdev);
+};
void virtio_instance_init_common(Object *proxy_obj, void *data,
size_t vdev_size, const char *vdev_name);
-void virtio_init(VirtIODevice *vdev, const char *name,
- uint16_t device_id, size_t config_size);
+/**
+ * virtio_init() - initialise the common VirtIODevice structure
+ * @vdev: pointer to VirtIODevice
+ * @device_id: the VirtIO device ID (see virtio_ids.h)
+ * @config_size: size of the config space
+ */
+void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size);
+
void virtio_cleanup(VirtIODevice *vdev);
-void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
/* Set the child bus name. */
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
-typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *);
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
VirtIOHandleOutput handle_output);
@@ -226,6 +291,13 @@ extern const VMStateInfo virtio_vmstate_info;
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id);
+/**
+ * virtio_notify_config() - signal a change to device config
+ * @vdev: the virtio device
+ *
+ * Assuming the virtio device is up (VIRTIO_CONFIG_S_DRIVER_OK) this
+ * will trigger a guest interrupt and update the config version.
+ */
void virtio_notify_config(VirtIODevice *vdev);
bool virtio_queue_get_notification(VirtQueue *vq);
@@ -261,6 +333,7 @@ int virtio_get_num_queues(VirtIODevice *vdev);
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used);
void virtio_queue_update_rings(VirtIODevice *vdev, int n);
+void virtio_init_region_cache(VirtIODevice *vdev, int n);
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align);
void virtio_queue_notify(VirtIODevice *vdev, int n);
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
@@ -269,6 +342,8 @@ int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
MemoryRegion *mr, bool assign);
int virtio_set_status(VirtIODevice *vdev, uint8_t val);
void virtio_reset(void *opaque);
+void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index);
+void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index);
void virtio_update_irq(VirtIODevice *vdev);
int virtio_set_features(VirtIODevice *vdev, uint64_t val);
@@ -292,7 +367,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
VIRTIO_F_IOMMU_PLATFORM, false), \
DEFINE_PROP_BIT64("packed", _state, _field, \
- VIRTIO_F_RING_PACKED, false)
+ VIRTIO_F_RING_PACKED, false), \
+ DEFINE_PROP_BIT64("queue_reset", _state, _field, \
+ VIRTIO_F_RING_RESET, true)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
@@ -320,10 +397,14 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
void virtio_queue_host_notifier_read(EventNotifier *n);
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtIOHandleAIOOutput handle_output);
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
+void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx);
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
+EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev);
+void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
+ bool assign, bool with_irqfd);
static inline void virtio_add_feature(uint64_t *features, unsigned int fbit)
{
@@ -343,7 +424,7 @@ static inline bool virtio_has_feature(uint64_t features, unsigned int fbit)
return !!(features & (1ULL << fbit));
}
-static inline bool virtio_vdev_has_feature(VirtIODevice *vdev,
+static inline bool virtio_vdev_has_feature(const VirtIODevice *vdev,
unsigned int fbit)
{
return virtio_has_feature(vdev->guest_features, fbit);
@@ -365,6 +446,16 @@ static inline bool virtio_is_big_endian(VirtIODevice *vdev)
return false;
}
+/**
+ * virtio_device_started() - check if device started
+ * @vdev - the VirtIO device
+ * @status - the devices status bits
+ *
+ * Check if the device is started. For most modern machines this is
+ * tracked via the @vdev->started field (to support migration),
+ * otherwise we check for the final negotiated status bit that
+ * indicates everything is ready.
+ */
static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
{
if (vdev->use_started) {
@@ -374,6 +465,24 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
return status & VIRTIO_CONFIG_S_DRIVER_OK;
}
+/**
+ * virtio_device_should_start() - check if device startable
+ * @vdev - the VirtIO device
+ * @status - the devices status bits
+ *
+ * This is similar to virtio_device_started() but also encapsulates a
+ * check on the VM status which would prevent a device starting
+ * anyway.
+ */
+static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
+{
+ if (!vdev->vm_running) {
+ return false;
+ }
+
+ return virtio_device_started(vdev, status);
+}
+
static inline void virtio_set_started(VirtIODevice *vdev, bool started)
{
if (started) {
@@ -398,5 +507,12 @@ static inline bool virtio_device_disabled(VirtIODevice *vdev)
}
bool virtio_legacy_allowed(VirtIODevice *vdev);
+bool virtio_legacy_check_disabled(VirtIODevice *vdev);
+
+QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev,
+ QEMUBHFunc *cb, void *opaque,
+ const char *name);
+#define virtio_bh_new_guarded(dev, cb, opaque) \
+ virtio_bh_new_guarded_full((dev), (cb), (opaque), (stringify(cb)))
#endif
diff --git a/include/hw/vmstate-if.h b/include/hw/vmstate-if.h
index 8ff7f0f292..52df571d17 100644
--- a/include/hw/vmstate-if.h
+++ b/include/hw/vmstate-if.h
@@ -13,20 +13,19 @@
#define TYPE_VMSTATE_IF "vmstate-if"
-#define VMSTATE_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(VMStateIfClass, (klass), TYPE_VMSTATE_IF)
-#define VMSTATE_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VMStateIfClass, (obj), TYPE_VMSTATE_IF)
+typedef struct VMStateIfClass VMStateIfClass;
+DECLARE_CLASS_CHECKERS(VMStateIfClass, VMSTATE_IF,
+ TYPE_VMSTATE_IF)
#define VMSTATE_IF(obj) \
INTERFACE_CHECK(VMStateIf, (obj), TYPE_VMSTATE_IF)
typedef struct VMStateIf VMStateIf;
-typedef struct VMStateIfClass {
+struct VMStateIfClass {
InterfaceClass parent_class;
char * (*get_id)(VMStateIf *obj);
-} VMStateIfClass;
+};
static inline char *vmstate_if_get_id(VMStateIf *vmif)
{
diff --git a/include/hw/watchdog/allwinner-wdt.h b/include/hw/watchdog/allwinner-wdt.h
new file mode 100644
index 0000000000..7fe41e20f2
--- /dev/null
+++ b/include/hw/watchdog/allwinner-wdt.h
@@ -0,0 +1,123 @@
+/*
+ * Allwinner Watchdog emulation
+ *
+ * Copyright (C) 2023 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
+ *
+ * This file is derived from Allwinner RTC,
+ * by Niek Linnenbank.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_WATCHDOG_ALLWINNER_WDT_H
+#define HW_WATCHDOG_ALLWINNER_WDT_H
+
+#include "qom/object.h"
+#include "hw/ptimer.h"
+#include "hw/sysbus.h"
+
+/*
+ * This is a model of the Allwinner watchdog.
+ * Since watchdog registers belong to the timer module (and are shared with the
+ * RTC module), the interrupt line from watchdog is not handled right now.
+ * In QEMU, we just wire up the watchdog reset to watchdog_perform_action(),
+ * at least for the moment.
+ */
+
+#define TYPE_AW_WDT "allwinner-wdt"
+
+/** Allwinner WDT sun4i family (A10, A12), also sun7i (A20) */
+#define TYPE_AW_WDT_SUN4I TYPE_AW_WDT "-sun4i"
+
+/** Allwinner WDT sun6i family and newer (A31, H2+, H3, etc) */
+#define TYPE_AW_WDT_SUN6I TYPE_AW_WDT "-sun6i"
+
+/** Number of WDT registers */
+#define AW_WDT_REGS_NUM (5)
+
+OBJECT_DECLARE_TYPE(AwWdtState, AwWdtClass, AW_WDT)
+
+/**
+ * Allwinner WDT object instance state.
+ */
+struct AwWdtState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+ struct ptimer_state *timer;
+
+ uint32_t regs[AW_WDT_REGS_NUM];
+};
+
+/**
+ * Allwinner WDT class-level struct.
+ *
+ * This struct is filled by each sunxi device specific code
+ * such that the generic code can use this struct to support
+ * all devices.
+ */
+struct AwWdtClass {
+ /*< private >*/
+ SysBusDeviceClass parent_class;
+ /*< public >*/
+
+ /** Defines device specific register map */
+ const uint8_t *regmap;
+
+ /** Size of the regmap in bytes */
+ size_t regmap_size;
+
+ /**
+ * Read device specific register
+ *
+ * @offset: register offset to read
+ * @return true if register read successful, false otherwise
+ */
+ bool (*read)(AwWdtState *s, uint32_t offset);
+
+ /**
+ * Write device specific register
+ *
+ * @offset: register offset to write
+ * @data: value to set in register
+ * @return true if register write successful, false otherwise
+ */
+ bool (*write)(AwWdtState *s, uint32_t offset, uint32_t data);
+
+ /**
+ * Check if watchdog can generate system reset
+ *
+ * @return true if watchdog can generate system reset
+ */
+ bool (*can_reset_system)(AwWdtState *s);
+
+ /**
+ * Check if provided key is valid
+ *
+ * @value: value written to register
+ * @return true if key is valid, false otherwise
+ */
+ bool (*is_key_valid)(AwWdtState *s, uint32_t val);
+
+ /**
+ * Get current INTV_VALUE setting
+ *
+ * @return current INTV_VALUE (0-15)
+ */
+ uint8_t (*get_intv_value)(AwWdtState *s);
+};
+
+#endif /* HW_WATCHDOG_ALLWINNER_WDT_H */
diff --git a/include/hw/watchdog/cmsdk-apb-watchdog.h b/include/hw/watchdog/cmsdk-apb-watchdog.h
index 6ae9531370..c6b3e78731 100644
--- a/include/hw/watchdog/cmsdk-apb-watchdog.h
+++ b/include/hw/watchdog/cmsdk-apb-watchdog.h
@@ -16,7 +16,7 @@
* https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
*
* QEMU interface:
- * + QOM property "wdogclk-frq": frequency at which the watchdog is clocked
+ * + Clock input "WDOGCLK": clock for the watchdog's timer
* + sysbus MMIO region 0: the register bank
* + sysbus IRQ 0: watchdog interrupt
*
@@ -33,10 +33,11 @@
#include "hw/sysbus.h"
#include "hw/ptimer.h"
+#include "hw/clock.h"
+#include "qom/object.h"
#define TYPE_CMSDK_APB_WATCHDOG "cmsdk-apb-watchdog"
-#define CMSDK_APB_WATCHDOG(obj) OBJECT_CHECK(CMSDKAPBWatchdog, (obj), \
- TYPE_CMSDK_APB_WATCHDOG)
+OBJECT_DECLARE_SIMPLE_TYPE(CMSDKAPBWatchdog, CMSDK_APB_WATCHDOG)
/*
* This shares the same struct (and cast macro) as the base
@@ -44,16 +45,16 @@
*/
#define TYPE_LUMINARY_WATCHDOG "luminary-watchdog"
-typedef struct CMSDKAPBWatchdog {
+struct CMSDKAPBWatchdog {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
qemu_irq wdogint;
- uint32_t wdogclk_frq;
bool is_luminary;
struct ptimer_state *timer;
+ Clock *wdogclk;
uint32_t control;
uint32_t intstatus;
@@ -62,6 +63,6 @@ typedef struct CMSDKAPBWatchdog {
uint32_t itop;
uint32_t resetstatus;
const uint32_t *id;
-} CMSDKAPBWatchdog;
+};
#endif
diff --git a/include/hw/watchdog/sbsa_gwdt.h b/include/hw/watchdog/sbsa_gwdt.h
new file mode 100644
index 0000000000..70b137de30
--- /dev/null
+++ b/include/hw/watchdog/sbsa_gwdt.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2020 Linaro Limited
+ *
+ * Authors:
+ * Shashi Mallela <shashi.mallela@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef WDT_SBSA_GWDT_H
+#define WDT_SBSA_GWDT_H
+
+#include "qemu/bitops.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+
+#define TYPE_WDT_SBSA "sbsa_gwdt"
+#define SBSA_GWDT(obj) \
+ OBJECT_CHECK(SBSA_GWDTState, (obj), TYPE_WDT_SBSA)
+#define SBSA_GWDT_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SBSA_GWDTClass, (klass), TYPE_WDT_SBSA)
+#define SBSA_GWDT_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SBSA_GWDTClass, (obj), TYPE_WDT_SBSA)
+
+/* SBSA Generic Watchdog register definitions */
+/* refresh frame */
+#define SBSA_GWDT_WRR 0x000
+
+/* control frame */
+#define SBSA_GWDT_WCS 0x000
+#define SBSA_GWDT_WOR 0x008
+#define SBSA_GWDT_WORU 0x00C
+#define SBSA_GWDT_WCV 0x010
+#define SBSA_GWDT_WCVU 0x014
+
+/* Watchdog Interface Identification Register */
+#define SBSA_GWDT_W_IIDR 0xFCC
+
+/* Watchdog Control and Status Register Bits */
+#define SBSA_GWDT_WCS_EN BIT(0)
+#define SBSA_GWDT_WCS_WS0 BIT(1)
+#define SBSA_GWDT_WCS_WS1 BIT(2)
+
+#define SBSA_GWDT_WOR_MASK 0x0000FFFF
+
+/*
+ * Watchdog Interface Identification Register definition
+ * considering JEP106 code for ARM in Bits [11:0]
+ */
+#define SBSA_GWDT_ID 0x1043B
+
+/* 2 Separate memory regions for each of refresh & control register frames */
+#define SBSA_GWDT_RMMIO_SIZE 0x1000
+#define SBSA_GWDT_CMMIO_SIZE 0x1000
+
+#define SBSA_TIMER_FREQ 62500000 /* Hz */
+
+typedef struct SBSA_GWDTState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion rmmio;
+ MemoryRegion cmmio;
+ qemu_irq irq;
+
+ QEMUTimer *timer;
+
+ uint32_t id;
+ uint32_t wcs;
+ uint32_t worl;
+ uint32_t woru;
+ uint32_t wcvl;
+ uint32_t wcvu;
+} SBSA_GWDTState;
+
+#endif /* WDT_SBSA_GWDT_H */
diff --git a/include/hw/watchdog/wdt_aspeed.h b/include/hw/watchdog/wdt_aspeed.h
index 819c22993a..e90ef86651 100644
--- a/include/hw/watchdog/wdt_aspeed.h
+++ b/include/hw/watchdog/wdt_aspeed.h
@@ -12,17 +12,18 @@
#include "hw/misc/aspeed_scu.h"
#include "hw/sysbus.h"
+#include "qom/object.h"
#define TYPE_ASPEED_WDT "aspeed.wdt"
-#define ASPEED_WDT(obj) \
- OBJECT_CHECK(AspeedWDTState, (obj), TYPE_ASPEED_WDT)
+OBJECT_DECLARE_TYPE(AspeedWDTState, AspeedWDTClass, ASPEED_WDT)
#define TYPE_ASPEED_2400_WDT TYPE_ASPEED_WDT "-ast2400"
#define TYPE_ASPEED_2500_WDT TYPE_ASPEED_WDT "-ast2500"
#define TYPE_ASPEED_2600_WDT TYPE_ASPEED_WDT "-ast2600"
+#define TYPE_ASPEED_1030_WDT TYPE_ASPEED_WDT "-ast1030"
-#define ASPEED_WDT_REGS_MAX (0x20 / 4)
+#define ASPEED_WDT_REGS_MAX (0x30 / 4)
-typedef struct AspeedWDTState {
+struct AspeedWDTState {
/*< private >*/
SysBusDevice parent_obj;
QEMUTimer *timer;
@@ -33,21 +34,20 @@ typedef struct AspeedWDTState {
AspeedSCUState *scu;
uint32_t pclk_freq;
-} AspeedWDTState;
+};
-#define ASPEED_WDT_CLASS(klass) \
- OBJECT_CLASS_CHECK(AspeedWDTClass, (klass), TYPE_ASPEED_WDT)
-#define ASPEED_WDT_GET_CLASS(obj) \
- OBJECT_GET_CLASS(AspeedWDTClass, (obj), TYPE_ASPEED_WDT)
-typedef struct AspeedWDTClass {
+struct AspeedWDTClass {
SysBusDeviceClass parent_class;
- uint32_t offset;
+ uint32_t iosize;
uint32_t ext_pulse_width_mask;
uint32_t reset_ctrl_reg;
void (*reset_pulse)(AspeedWDTState *s, uint32_t property);
void (*wdt_reload)(AspeedWDTState *s);
-} AspeedWDTClass;
+ uint64_t (*sanitize_ctrl)(uint64_t data);
+ uint32_t default_status;
+ uint32_t default_reload_value;
+};
#endif /* WDT_ASPEED_H */
diff --git a/include/hw/watchdog/wdt_diag288.h b/include/hw/watchdog/wdt_diag288.h
index 19d83a0937..f72c1d3318 100644
--- a/include/hw/watchdog/wdt_diag288.h
+++ b/include/hw/watchdog/wdt_diag288.h
@@ -2,35 +2,34 @@
#define WDT_DIAG288_H
#include "hw/qdev-core.h"
+#include "qom/object.h"
#define TYPE_WDT_DIAG288 "diag288"
-#define DIAG288(obj) \
- OBJECT_CHECK(DIAG288State, (obj), TYPE_WDT_DIAG288)
-#define DIAG288_CLASS(klass) \
- OBJECT_CLASS_CHECK(DIAG288Class, (klass), TYPE_WDT_DIAG288)
-#define DIAG288_GET_CLASS(obj) \
- OBJECT_GET_CLASS(DIAG288Class, (obj), TYPE_WDT_DIAG288)
+typedef struct DIAG288Class DIAG288Class;
+typedef struct DIAG288State DIAG288State;
+DECLARE_OBJ_CHECKERS(DIAG288State, DIAG288Class,
+ DIAG288, TYPE_WDT_DIAG288)
#define WDT_DIAG288_INIT 0
#define WDT_DIAG288_CHANGE 1
#define WDT_DIAG288_CANCEL 2
-typedef struct DIAG288State {
+struct DIAG288State {
/*< private >*/
DeviceState parent_obj;
QEMUTimer *timer;
bool enabled;
/*< public >*/
-} DIAG288State;
+};
-typedef struct DIAG288Class {
+struct DIAG288Class {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
int (*handle_timer)(DIAG288State *dev,
uint64_t func, uint64_t timeout);
-} DIAG288Class;
+};
#endif /* WDT_DIAG288_H */
diff --git a/include/hw/watchdog/wdt_imx2.h b/include/hw/watchdog/wdt_imx2.h
index f9af6be4b6..600a552d2e 100644
--- a/include/hw/watchdog/wdt_imx2.h
+++ b/include/hw/watchdog/wdt_imx2.h
@@ -9,16 +9,17 @@
* See the COPYING file in the top-level directory.
*/
-#ifndef IMX2_WDT_H
-#define IMX2_WDT_H
+#ifndef WDT_IMX2_H
+#define WDT_IMX2_H
#include "qemu/bitops.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
#include "hw/ptimer.h"
+#include "qom/object.h"
#define TYPE_IMX2_WDT "imx2.wdt"
-#define IMX2_WDT(obj) OBJECT_CHECK(IMX2WdtState, (obj), TYPE_IMX2_WDT)
+OBJECT_DECLARE_SIMPLE_TYPE(IMX2WdtState, IMX2_WDT)
enum IMX2WdtRegisters {
IMX2_WDT_WCR = 0x0000, /* Control Register */
@@ -62,7 +63,7 @@ enum IMX2WdtRegisters {
/* Misc Control Register definitions */
#define IMX2_WDT_WMCR_PDE BIT(0) /* Power-Down Enable */
-typedef struct IMX2WdtState {
+struct IMX2WdtState {
/* <private> */
SysBusDevice parent_obj;
@@ -85,6 +86,6 @@ typedef struct IMX2WdtState {
bool wcr_locked; /* affects WDZST, WDBG, and WDW */
bool wcr_wde_locked; /* affects WDE */
bool wcr_wdt_locked; /* affects WDT (never cleared) */
-} IMX2WdtState;
+};
-#endif /* IMX2_WDT_H */
+#endif /* WDT_IMX2_H */
diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h
new file mode 100644
index 0000000000..c7c515220d
--- /dev/null
+++ b/include/hw/xen/arch_hvm.h
@@ -0,0 +1,5 @@
+#if defined(TARGET_I386) || defined(TARGET_X86_64)
+#include "hw/i386/xen_arch_hvm.h"
+#elif defined(TARGET_ARM) || defined(TARGET_ARM_64)
+#include "hw/arm/xen_arch_hvm.h"
+#endif
diff --git a/include/hw/xen/interface/arch-arm.h b/include/hw/xen/interface/arch-arm.h
new file mode 100644
index 0000000000..1528ced509
--- /dev/null
+++ b/include/hw/xen/interface/arch-arm.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-arm.h
+ *
+ * Guest OS interface to ARM Xen.
+ *
+ * Copyright 2011 (C) Citrix Systems
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_ARM_H__
+#define __XEN_PUBLIC_ARCH_ARM_H__
+
+/*
+ * `incontents 50 arm_abi Hypercall Calling Convention
+ *
+ * A hypercall is issued using the ARM HVC instruction.
+ *
+ * A hypercall can take up to 5 arguments. These are passed in
+ * registers, the first argument in x0/r0 (for arm64/arm32 guests
+ * respectively irrespective of whether the underlying hypervisor is
+ * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2,
+ * the forth in x3/r3 and the fifth in x4/r4.
+ *
+ * The hypercall number is passed in r12 (arm) or x16 (arm64). In both
+ * cases the relevant ARM procedure calling convention specifies this
+ * is an inter-procedure-call scratch register (e.g. for use in linker
+ * stubs). This use does not conflict with use during a hypercall.
+ *
+ * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG.
+ *
+ * The return value is in x0/r0.
+ *
+ * The hypercall will clobber x16/r12 and the argument registers used
+ * by that hypercall (except r0 which is the return value) i.e. in
+ * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a
+ * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3.
+ *
+ * Parameter structs passed to hypercalls are laid out according to
+ * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA
+ * EABI) and Procedure Call Standard for the ARM 64-bit Architecture
+ * (AAPCS64). Where there is a conflict the 64-bit standard should be
+ * used regardless of guest type. Structures which are passed as
+ * hypercall arguments are always little endian.
+ *
+ * All memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back Inner-Shareable.
+ * This applies to:
+ * - hypercall arguments passed via a pointer to guest memory.
+ * - memory shared via the grant table mechanism (including PV I/O
+ * rings etc).
+ * - memory shared with the hypervisor (struct shared_info, struct
+ * vcpu_info, the grant table, etc).
+ *
+ * Any cache allocation hints are acceptable.
+ */
+
+/*
+ * `incontents 55 arm_hcall Supported Hypercalls
+ *
+ * Xen on ARM makes extensive use of hardware facilities and therefore
+ * only a subset of the potential hypercalls are required.
+ *
+ * Since ARM uses second stage paging any machine/physical addresses
+ * passed to hypercalls are Guest Physical Addresses (Intermediate
+ * Physical Addresses) unless otherwise noted.
+ *
+ * The following hypercalls (and sub operations) are supported on the
+ * ARM platform. Other hypercalls should be considered
+ * unavailable/unsupported.
+ *
+ * HYPERVISOR_memory_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_domctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_DOMCTL_irq_permission (not yet implemented)
+ *
+ * HYPERVISOR_sched_op
+ * All generic sub-operations, with the exception of:
+ * * SCHEDOP_block -- prefer wfi hardware instruction
+ *
+ * HYPERVISOR_console_io
+ * All generic sub-operations
+ *
+ * HYPERVISOR_xen_version
+ * All generic sub-operations
+ *
+ * HYPERVISOR_event_channel_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_physdev_op
+ * Exactly these sub-operations are supported:
+ * PHYSDEVOP_pci_device_add
+ * PHYSDEVOP_pci_device_remove
+ *
+ * HYPERVISOR_sysctl
+ * All generic sub-operations, with the exception of:
+ * * XEN_SYSCTL_page_offline_op
+ * * XEN_SYSCTL_get_pmstat
+ * * XEN_SYSCTL_pm_op
+ *
+ * HYPERVISOR_hvm_op
+ * Exactly these sub-operations are supported:
+ * * HVMOP_set_param
+ * * HVMOP_get_param
+ *
+ * HYPERVISOR_grant_table_op
+ * All generic sub-operations
+ *
+ * HYPERVISOR_vcpu_op
+ * Exactly these sub-operations are supported:
+ * * VCPUOP_register_vcpu_info
+ * * VCPUOP_register_runstate_memory_area
+ *
+ * HYPERVISOR_argo_op
+ * All generic sub-operations
+ *
+ * Other notes on the ARM ABI:
+ *
+ * - struct start_info is not exported to ARM guests.
+ *
+ * - struct shared_info is mapped by ARM guests using the
+ * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing
+ * XENMAPSPACE_shared_info as space parameter.
+ *
+ * - All the per-cpu struct vcpu_info are mapped by ARM guests using the
+ * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0
+ * struct vcpu_info.
+ *
+ * - The grant table is mapped using the HYPERVISOR_memory_op sub-op
+ * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space
+ * parameter. The memory range specified under the Xen compatible
+ * hypervisor node on device tree can be used as target gpfn for the
+ * mapping.
+ *
+ * - Xenstore is initialized by using the two hvm_params
+ * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read
+ * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - The paravirtualized console is initialized by using the two
+ * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They
+ * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
+ *
+ * - Event channel notifications are delivered using the percpu GIC
+ * interrupt specified under the Xen compatible hypervisor node on
+ * device tree.
+ *
+ * - The device tree Xen compatible node is fully described under Linux
+ * at Documentation/devicetree/bindings/arm/xen.txt.
+ */
+
+#define XEN_HYPERCALL_TAG 0XEA1
+
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#ifndef __ASSEMBLY__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef union { type *p; unsigned long q; } \
+ __guest_handle_ ## name; \
+ typedef union { type *p; uint64_aligned_t q; } \
+ __guest_handle_64_ ## name
+
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
+ * aligned.
+ * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
+ */
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
+#define set_xen_guest_handle_raw(hnd, val) \
+ do { \
+ __typeof__(&(hnd)) _sxghr_tmp = &(hnd); \
+ _sxghr_tmp->q = 0; \
+ _sxghr_tmp->p = val; \
+ } while ( 0 )
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn PRIx64
+#define PRIu_xen_pfn PRIu64
+
+/*
+ * Maximum number of virtual CPUs in legacy multi-processor guests.
+ * Only one. All other VCPUS must use VCPUOP_register_vcpu_info.
+ */
+#define XEN_LEGACY_MAX_VCPUS 1
+
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong PRIx64
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
+# define __DECL_REG(n64, n32) union { \
+ uint64_t n64; \
+ uint32_t n32; \
+ }
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
+#define __DECL_REG(n64, n32) uint64_t n64
+#endif
+
+struct vcpu_guest_core_regs
+{
+ /* Aarch64 Aarch32 */
+ __DECL_REG(x0, r0_usr);
+ __DECL_REG(x1, r1_usr);
+ __DECL_REG(x2, r2_usr);
+ __DECL_REG(x3, r3_usr);
+ __DECL_REG(x4, r4_usr);
+ __DECL_REG(x5, r5_usr);
+ __DECL_REG(x6, r6_usr);
+ __DECL_REG(x7, r7_usr);
+ __DECL_REG(x8, r8_usr);
+ __DECL_REG(x9, r9_usr);
+ __DECL_REG(x10, r10_usr);
+ __DECL_REG(x11, r11_usr);
+ __DECL_REG(x12, r12_usr);
+
+ __DECL_REG(x13, sp_usr);
+ __DECL_REG(x14, lr_usr);
+
+ __DECL_REG(x15, __unused_sp_hyp);
+
+ __DECL_REG(x16, lr_irq);
+ __DECL_REG(x17, sp_irq);
+
+ __DECL_REG(x18, lr_svc);
+ __DECL_REG(x19, sp_svc);
+
+ __DECL_REG(x20, lr_abt);
+ __DECL_REG(x21, sp_abt);
+
+ __DECL_REG(x22, lr_und);
+ __DECL_REG(x23, sp_und);
+
+ __DECL_REG(x24, r8_fiq);
+ __DECL_REG(x25, r9_fiq);
+ __DECL_REG(x26, r10_fiq);
+ __DECL_REG(x27, r11_fiq);
+ __DECL_REG(x28, r12_fiq);
+
+ __DECL_REG(x29, sp_fiq);
+ __DECL_REG(x30, lr_fiq);
+
+ /* Return address and mode */
+ __DECL_REG(pc64, pc32); /* ELR_EL2 */
+ uint64_t cpsr; /* SPSR_EL2 */
+
+ union {
+ uint64_t spsr_el1; /* AArch64 */
+ uint32_t spsr_svc; /* AArch32 */
+ };
+
+ /* AArch32 guests only */
+ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
+
+ /* AArch64 guests only */
+ uint64_t sp_el0;
+ uint64_t sp_el1, elr_el1;
+};
+typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
+
+#undef __DECL_REG
+
+struct vcpu_guest_context {
+#define _VGCF_online 0
+#define VGCF_online (1<<_VGCF_online)
+ uint32_t flags; /* VGCF_* */
+
+ struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
+
+ uint64_t sctlr;
+ uint64_t ttbcr, ttbr0, ttbr1;
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
+#define XEN_DOMCTL_CONFIG_GIC_V2 1
+#define XEN_DOMCTL_CONFIG_GIC_V3 2
+
+#define XEN_DOMCTL_CONFIG_TEE_NONE 0
+#define XEN_DOMCTL_CONFIG_TEE_OPTEE 1
+
+struct xen_arch_domainconfig {
+ /* IN/OUT */
+ uint8_t gic_version;
+ /* IN */
+ uint16_t tee_type;
+ /* IN */
+ uint32_t nr_spis;
+ /*
+ * OUT
+ * Based on the property clock-frequency in the DT timer node.
+ * The property may be present when the bootloader/firmware doesn't
+ * set correctly CNTFRQ which hold the timer frequency.
+ *
+ * As it's not possible to trap this register, we have to replicate
+ * the value in the guest DT.
+ *
+ * = 0 => property not present
+ * > 0 => Value of the property
+ *
+ */
+ uint32_t clock_frequency;
+};
+#endif /* __XEN__ || __XEN_TOOLS__ */
+
+struct arch_vcpu_info {
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct arch_shared_info {
+};
+typedef struct arch_shared_info arch_shared_info_t;
+typedef uint64_t xen_callback_t;
+
+#endif
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* PSR bits (CPSR, SPSR) */
+
+#define PSR_THUMB (1<<5) /* Thumb Mode enable */
+#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
+#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
+#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
+#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */
+#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */
+#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */
+#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
+#define PSR_Z (1<<30) /* Zero condition flag */
+
+/* 32 bit modes */
+#define PSR_MODE_USR 0x10
+#define PSR_MODE_FIQ 0x11
+#define PSR_MODE_IRQ 0x12
+#define PSR_MODE_SVC 0x13
+#define PSR_MODE_MON 0x16
+#define PSR_MODE_ABT 0x17
+#define PSR_MODE_HYP 0x1a
+#define PSR_MODE_UND 0x1b
+#define PSR_MODE_SYS 0x1f
+
+/* 64 bit modes */
+#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
+#define PSR_MODE_EL3h 0x0d
+#define PSR_MODE_EL3t 0x0c
+#define PSR_MODE_EL2h 0x09
+#define PSR_MODE_EL2t 0x08
+#define PSR_MODE_EL1h 0x05
+#define PSR_MODE_EL1t 0x04
+#define PSR_MODE_EL0t 0x00
+
+/*
+ * We set PSR_Z to be able to boot Linux kernel versions with an invalid
+ * encoding of the first 8 NOP instructions. See commit a92882a4d270 in
+ * Linux.
+ *
+ * Note that PSR_Z is also set by U-Boot and QEMU -kernel when loading
+ * zImage kernels on aarch32.
+ */
+#define PSR_GUEST32_INIT (PSR_Z|PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
+#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
+
+#define SCTLR_GUEST_INIT xen_mk_ullong(0x00c50078)
+
+/*
+ * Virtual machine platform (memory layout, interrupts)
+ *
+ * These are defined for consistency between the tools and the
+ * hypervisor. Guests must not rely on these hardcoded values but
+ * should instead use the FDT.
+ */
+
+/* Physical Address Space */
+
+/* Virtio MMIO mappings */
+#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
+#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
+
+/*
+ * vGIC mappings: Only one set of mapping is used by the guest.
+ * Therefore they can overlap.
+ */
+
+/* vGIC v2 mappings */
+#define GUEST_GICD_BASE xen_mk_ullong(0x03001000)
+#define GUEST_GICD_SIZE xen_mk_ullong(0x00001000)
+#define GUEST_GICC_BASE xen_mk_ullong(0x03002000)
+#define GUEST_GICC_SIZE xen_mk_ullong(0x00002000)
+
+/* vGIC v3 mappings */
+#define GUEST_GICV3_GICD_BASE xen_mk_ullong(0x03001000)
+#define GUEST_GICV3_GICD_SIZE xen_mk_ullong(0x00010000)
+
+#define GUEST_GICV3_RDIST_REGIONS 1
+
+#define GUEST_GICV3_GICR0_BASE xen_mk_ullong(0x03020000) /* vCPU0..127 */
+#define GUEST_GICV3_GICR0_SIZE xen_mk_ullong(0x01000000)
+
+/*
+ * 256 MB is reserved for VPCI configuration space based on calculation
+ * 256 buses x 32 devices x 8 functions x 4 KB = 256 MB
+ */
+#define GUEST_VPCI_ECAM_BASE xen_mk_ullong(0x10000000)
+#define GUEST_VPCI_ECAM_SIZE xen_mk_ullong(0x10000000)
+
+/* ACPI tables physical address */
+#define GUEST_ACPI_BASE xen_mk_ullong(0x20000000)
+#define GUEST_ACPI_SIZE xen_mk_ullong(0x02000000)
+
+/* PL011 mappings */
+#define GUEST_PL011_BASE xen_mk_ullong(0x22000000)
+#define GUEST_PL011_SIZE xen_mk_ullong(0x00001000)
+
+/* Guest PCI-PCIe memory space where config space and BAR will be available.*/
+#define GUEST_VPCI_ADDR_TYPE_MEM xen_mk_ullong(0x02000000)
+#define GUEST_VPCI_MEM_ADDR xen_mk_ullong(0x23000000)
+#define GUEST_VPCI_MEM_SIZE xen_mk_ullong(0x10000000)
+
+/*
+ * 16MB == 4096 pages reserved for guest to use as a region to map its
+ * grant table in.
+ */
+#define GUEST_GNTTAB_BASE xen_mk_ullong(0x38000000)
+#define GUEST_GNTTAB_SIZE xen_mk_ullong(0x01000000)
+
+#define GUEST_MAGIC_BASE xen_mk_ullong(0x39000000)
+#define GUEST_MAGIC_SIZE xen_mk_ullong(0x01000000)
+
+#define GUEST_RAM_BANKS 2
+
+/*
+ * The way to find the extended regions (to be exposed to the guest as unused
+ * address space) relies on the fact that the regions reserved for the RAM
+ * below are big enough to also accommodate such regions.
+ */
+#define GUEST_RAM0_BASE xen_mk_ullong(0x40000000) /* 3GB of low RAM @ 1GB */
+#define GUEST_RAM0_SIZE xen_mk_ullong(0xc0000000)
+
+/* 4GB @ 4GB Prefetch Memory for VPCI */
+#define GUEST_VPCI_ADDR_TYPE_PREFETCH_MEM xen_mk_ullong(0x42000000)
+#define GUEST_VPCI_PREFETCH_MEM_ADDR xen_mk_ullong(0x100000000)
+#define GUEST_VPCI_PREFETCH_MEM_SIZE xen_mk_ullong(0x100000000)
+
+#define GUEST_RAM1_BASE xen_mk_ullong(0x0200000000) /* 1016GB of RAM @ 8GB */
+#define GUEST_RAM1_SIZE xen_mk_ullong(0xfe00000000)
+
+#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
+/* Largest amount of actual RAM, not including holes */
+#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
+/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */
+#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
+#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
+
+/* Current supported guest VCPUs */
+#define GUEST_MAX_VCPUS 128
+
+/* Interrupts */
+#define GUEST_TIMER_VIRT_PPI 27
+#define GUEST_TIMER_PHYS_S_PPI 29
+#define GUEST_TIMER_PHYS_NS_PPI 30
+#define GUEST_EVTCHN_PPI 31
+
+#define GUEST_VPL011_SPI 32
+
+#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
+#define GUEST_VIRTIO_MMIO_SPI_LAST 43
+
+/* PSCI functions */
+#define PSCI_cpu_suspend 0
+#define PSCI_cpu_off 1
+#define PSCI_cpu_on 2
+#define PSCI_migrate 3
+
+#endif
+
+#ifndef __ASSEMBLY__
+/* Stub definition of PMU structure */
+typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
+#endif
+
+#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/cpuid.h b/include/hw/xen/interface/arch-x86/cpuid.h
new file mode 100644
index 0000000000..7ecd16ae05
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/cpuid.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-x86/cpuid.h
+ *
+ * CPUID interface to Xen.
+ *
+ * Copyright (c) 2007 Citrix Systems, Inc.
+ *
+ * Authors:
+ * Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
+#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
+
+/*
+ * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
+ * can be found at the first otherwise unused 0x100 aligned boundary starting
+ * from 0x40000000.
+ *
+ * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
+ * leaves will start at 0x40000100
+ */
+
+#define XEN_CPUID_FIRST_LEAF 0x40000000
+#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
+
+/*
+ * Leaf 1 (0x40000x00)
+ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
+ * are supported by the Xen host.
+ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
+ * of a Xen host.
+ */
+#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
+#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
+#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
+
+/*
+ * Leaf 2 (0x40000x01)
+ * EAX[31:16]: Xen major version.
+ * EAX[15: 0]: Xen minor version.
+ * EBX-EDX: Reserved (currently all zeroes).
+ */
+
+/*
+ * Leaf 3 (0x40000x02)
+ * EAX: Number of hypercall transfer pages. This register is always guaranteed
+ * to specify one hypercall page.
+ * EBX: Base address of Xen-specific MSRs.
+ * ECX: Features 1. Unused bits are set to zero.
+ * EDX: Features 2. Unused bits are set to zero.
+ */
+
+/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
+#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
+#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
+
+/*
+ * Leaf 4 (0x40000x03)
+ * Sub-leaf 0: EAX: bit 0: emulated tsc
+ * bit 1: host tsc is known to be reliable
+ * bit 2: RDTSCP instruction available
+ * EBX: tsc_mode: 0=default (emulate if necessary), 1=emulate,
+ * 2=no emulation, 3=no emulation + TSC_AUX support
+ * ECX: guest tsc frequency in kHz
+ * EDX: guest tsc incarnation (migration count)
+ * Sub-leaf 1: EAX: tsc offset low part
+ * EBX: tsc offset high part
+ * ECX: multiplicator for tsc->ns conversion
+ * EDX: shift amount for tsc->ns conversion
+ * Sub-leaf 2: EAX: host tsc frequency in kHz
+ */
+
+/*
+ * Leaf 5 (0x40000x04)
+ * HVM-specific features
+ * Sub-leaf 0: EAX: Features
+ * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
+ * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
+ */
+#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
+#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
+/* Memory mapped from other domains has valid IOMMU entries */
+#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
+/*
+ * With interrupt format set to 0 (non-remappable) bits 55:49 from the
+ * IO-APIC RTE and bits 11:5 from the MSI address can be used to store
+ * high bits for the Destination ID. This expands the Destination ID
+ * field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
+/*
+ * Per-vCPU event channel upcalls work correctly with physical IRQs
+ * bound to event channels.
+ */
+#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
+
+/*
+ * Leaf 6 (0x40000x05)
+ * PV-specific parameters
+ * Sub-leaf 0: EAX: max available sub-leaf
+ * Sub-leaf 0: EBX: bits 0-7: max machine address width
+ */
+
+/* Max. address width in bits taking memory hotplug into account. */
+#define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0)
+
+#define XEN_CPUID_MAX_NUM_LEAVES 5
+
+#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/include/hw/xen/interface/arch-x86/xen-x86_32.h b/include/hw/xen/interface/arch-x86/xen-x86_32.h
new file mode 100644
index 0000000000..139438e835
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen-x86_32.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004-2007, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
+
+/*
+ * Hypercall interface:
+ * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
+ * Output: %eax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
+ */
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
+#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
+#define HYPERVISOR_VIRT_START_PAE xen_mk_ulong(__HYPERVISOR_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_START_PAE xen_mk_ulong(__MACH2PHYS_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_END_PAE xen_mk_ulong(__MACH2PHYS_VIRT_END_PAE)
+
+/* Non-PAE bounds are obsolete. */
+#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
+#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
+#define HYPERVISOR_VIRT_START_NONPAE \
+ xen_mk_ulong(__HYPERVISOR_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_START_NONPAE \
+ xen_mk_ulong(__MACH2PHYS_VIRT_START_NONPAE)
+#define MACH2PHYS_VIRT_END_NONPAE \
+ xen_mk_ulong(__MACH2PHYS_VIRT_END_NONPAE)
+
+#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
+#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
+#endif
+
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
+#endif
+
+/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#undef ___DEFINE_XEN_GUEST_HANDLE
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } \
+ __guest_handle_ ## name; \
+ typedef struct { union { type *p; uint64_aligned_t q; }; } \
+ __guest_handle_64_ ## name
+#undef set_xen_guest_handle_raw
+#define set_xen_guest_handle_raw(hnd, val) \
+ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while ( 0 )
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
+#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#if defined(XEN_GENERATING_COMPAT_HEADERS)
+/* nothing */
+#elif defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */
+#define __DECL_REG_LO8(which) union { \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint32_t e ## name, _e ## name; \
+ uint16_t name; \
+}
+#else
+/* Other sources must always use the proper 32-bit name (e.g., eax). */
+#define __DECL_REG_LO8(which) uint32_t e ## which ## x
+#define __DECL_REG_LO16(name) uint32_t e ## name
+#endif
+
+struct cpu_user_regs {
+ __DECL_REG_LO8(b);
+ __DECL_REG_LO8(c);
+ __DECL_REG_LO8(d);
+ __DECL_REG_LO16(si);
+ __DECL_REG_LO16(di);
+ __DECL_REG_LO16(bp);
+ __DECL_REG_LO8(a);
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ __DECL_REG_LO16(ip);
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ __DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO16(sp);
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct xen_callback {
+ unsigned long cs;
+ unsigned long eip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/xen-x86_64.h b/include/hw/xen/interface/arch-x86/xen-x86_64.h
new file mode 100644
index 0000000000..5d9035ed22
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen-x86_64.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-x86_64.h
+ *
+ * Guest OS interface to x86 64-bit Xen.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
+
+/*
+ * Hypercall interface:
+ * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
+ * Output: %rax
+ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
+ * call hypercall_page + hypercall-number * 32
+ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
+ */
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
+#define FLAT_RING3_CS64 0xe033 /* GDT index 262 */
+#define FLAT_RING3_DS32 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
+#define FLAT_RING3_SS32 0xe02b /* GDT index 261 */
+#define FLAT_RING3_SS64 0xe02b /* GDT index 261 */
+
+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
+
+#define FLAT_USER_DS64 FLAT_RING3_DS64
+#define FLAT_USER_DS32 FLAT_RING3_DS32
+#define FLAT_USER_DS FLAT_USER_DS64
+#define FLAT_USER_CS64 FLAT_RING3_CS64
+#define FLAT_USER_CS32 FLAT_RING3_CS32
+#define FLAT_USER_CS FLAT_USER_CS64
+#define FLAT_USER_SS64 FLAT_RING3_SS64
+#define FLAT_USER_SS32 FLAT_RING3_SS32
+#define FLAT_USER_SS FLAT_USER_SS64
+
+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
+#define HYPERVISOR_VIRT_END xen_mk_ulong(__HYPERVISOR_VIRT_END)
+#endif
+
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/*
+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
+ * @which == SEGBASE_* ; @base == 64-bit base address
+ * Returns 0 on success.
+ */
+#define SEGBASE_FS 0
+#define SEGBASE_GS_USER 1
+#define SEGBASE_GS_KERNEL 2
+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
+
+/*
+ * int HYPERVISOR_iret(void)
+ * All arguments are on the kernel stack, in the following format.
+ * Never returns if successful. Current kernel context is lost.
+ * The saved CS is mapped as follows:
+ * RING0 -> RING3 kernel mode.
+ * RING1 -> RING3 kernel mode.
+ * RING2 -> RING3 kernel mode.
+ * RING3 -> RING3 user mode.
+ * However RING0 indicates that the guest kernel should return to iteself
+ * directly with
+ * orb $3,1*8(%rsp)
+ * iretq
+ * If flags contains VGCF_in_syscall:
+ * Restore RAX, RIP, RFLAGS, RSP.
+ * Discard R11, RCX, CS, SS.
+ * Otherwise:
+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
+ * All other registers are saved on hypercall entry and restored to user.
+ */
+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL VGCF_in_syscall
+
+#ifndef __ASSEMBLY__
+
+struct iret_context {
+ /* Top of stack (%rsp at point of hypercall). */
+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ /* Bottom of iret stack frame. */
+};
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
+#define __DECL_REG_LOHI(which) union { \
+ uint64_t r ## which ## x; \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO8(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+ uint8_t name ## l; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+}
+#define __DECL_REG_HI(num) union { \
+ uint64_t r ## num; \
+ uint32_t r ## num ## d; \
+ uint16_t r ## num ## w; \
+ uint8_t r ## num ## b; \
+}
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
+#define __DECL_REG(name) union { \
+ uint64_t r ## name, e ## name; \
+ uint32_t _e ## name; \
+}
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
+#define __DECL_REG(name) uint64_t r ## name
+#endif
+
+#ifndef __DECL_REG_LOHI
+#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
+#define __DECL_REG_LO8 __DECL_REG
+#define __DECL_REG_LO16 __DECL_REG
+#define __DECL_REG_HI(num) uint64_t r ## num
+#endif
+
+struct cpu_user_regs {
+ __DECL_REG_HI(15);
+ __DECL_REG_HI(14);
+ __DECL_REG_HI(13);
+ __DECL_REG_HI(12);
+ __DECL_REG_LO8(bp);
+ __DECL_REG_LOHI(b);
+ __DECL_REG_HI(11);
+ __DECL_REG_HI(10);
+ __DECL_REG_HI(9);
+ __DECL_REG_HI(8);
+ __DECL_REG_LOHI(a);
+ __DECL_REG_LOHI(c);
+ __DECL_REG_LOHI(d);
+ __DECL_REG_LO8(si);
+ __DECL_REG_LO8(di);
+ uint32_t error_code; /* private */
+ uint32_t entry_vector; /* private */
+ __DECL_REG_LO16(ip);
+ uint16_t cs, _pad0[1];
+ uint8_t saved_upcall_mask;
+ uint8_t _pad1[3];
+ __DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO8(sp);
+ uint16_t ss, _pad2[3];
+ uint16_t es, _pad3[3];
+ uint16_t ds, _pad4[3];
+ uint16_t fs, _pad5[3];
+ uint16_t gs, _pad6[3];
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+#undef __DECL_REG
+#undef __DECL_REG_LOHI
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+#undef __DECL_REG_HI
+
+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
+};
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+typedef unsigned long xen_callback_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/arch-x86/xen.h b/include/hw/xen/interface/arch-x86/xen.h
new file mode 100644
index 0000000000..c0f4551247
--- /dev/null
+++ b/include/hw/xen/interface/arch-x86/xen.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * arch-x86/xen.h
+ *
+ * Guest OS interface to x86 Xen.
+ *
+ * Copyright (c) 2004-2006, K A Fraser
+ */
+
+#include "../xen.h"
+
+#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
+#define __XEN_PUBLIC_ARCH_X86_XEN_H__
+
+/* Structural guest handles introduced in 0x00030201. */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030201
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument.
+ * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
+ * they might not be on other architectures.
+ */
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+#if defined(__i386__)
+# ifdef __XEN__
+__DeFiNe__ __DECL_REG_LO8(which) uint32_t e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name; }
+# endif
+#include "xen-x86_32.h"
+# ifdef __XEN__
+__UnDeF__ __DECL_REG_LO8
+__UnDeF__ __DECL_REG_LO16
+__DeFiNe__ __DECL_REG_LO8(which) e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) e ## name
+# endif
+#elif defined(__x86_64__)
+#include "xen-x86_64.h"
+#endif
+
+#ifndef __ASSEMBLY__
+typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
+#define PRIu_xen_pfn "lu"
+#endif
+
+#define XEN_HAVE_PV_GUEST_ENTRY 1
+
+#define XEN_HAVE_PV_UPCALL_MASK 1
+
+/*
+ * `incontents 200 segdesc Segment Descriptor Tables
+ */
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries);
+ * `
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ *
+ * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
+ * `
+ * ` @pa The machine physical address of the descriptor to
+ * ` update. Must be either a descriptor page or writable.
+ * ` @desc The descriptor value to update, in the same format as a
+ * ` native descriptor table entry.
+ */
+
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+#define XEN_LEGACY_MAX_VCPUS 32
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp);
+ * `
+ * Sets the stack segment and pointer for the current vcpu.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]);
+ * `
+ */
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table().
+ * Terminate the array with a sentinel entry, with traps[].address==0.
+ * The privilege level specifies which modes may enter a trap via a software
+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
+ * privilege levels as follows:
+ * Level == 0: Noone may enter
+ * Level == 1: Kernel may enter
+ * Level == 2: Kernel may enter
+ * Level == 3: Everyone may enter
+ *
+ * Note: For compatibility with kernels not setting up exception handlers
+ * early enough, Xen will avoid trying to inject #GP (and hence crash
+ * the domain) when an RDMSR would require this, but no handler was
+ * set yet. The precise conditions are implementation specific, and
+ * new code may not rely on such behavior anyway.
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+typedef struct trap_info trap_info_t;
+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ *
+ * Also note that when calling DOMCTL_setvcpucontext for HVM guests, not all
+ * information in this structure is updated, the fields read include: fpu_ctxt
+ * (if VGCT_I387_VALID is set), flags, user_regs and debugreg[*].
+ *
+ * Note: VCPUOP_initialise for HVM guests is non-symetric with
+ * DOMCTL_setvcpucontext, and uses struct vcpu_hvm_context from hvm/hvm_vcpu.h
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_IN_KERNEL (1<<2)
+#define _VGCF_i387_valid 0
+#define VGCF_i387_valid (1<<_VGCF_i387_valid)
+#define _VGCF_in_kernel 2
+#define VGCF_in_kernel (1<<_VGCF_in_kernel)
+#define _VGCF_failsafe_disables_events 3
+#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
+#define _VGCF_syscall_disables_events 4
+#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
+#define _VGCF_online 5
+#define VGCF_online (1<<_VGCF_online)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+#ifdef __i386__
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+#else
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_eip;
+#ifdef __XEN__
+ union {
+ unsigned long syscall_callback_eip;
+ struct {
+ unsigned int event_callback_cs; /* compat CS of event cb */
+ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
+ };
+ };
+#else
+ unsigned long syscall_callback_eip;
+#endif
+#endif
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+#ifdef __x86_64__
+ /* Segment base addresses. */
+ uint64_t fs_base;
+ uint64_t gs_base_kernel;
+ uint64_t gs_base_user;
+#endif
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_shared_info {
+ /*
+ * Number of valid entries in the p2m table(s) anchored at
+ * pfn_to_mfn_frame_list_list and/or p2m_vaddr.
+ */
+ unsigned long max_pfn;
+ /*
+ * Frame containing list of mfns containing list of mfns containing p2m.
+ * A value of 0 indicates it has not yet been set up, ~0 indicates it has
+ * been set to invalid e.g. due to the p2m being too large for the 3-level
+ * p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr
+ * is to be used.
+ */
+ xen_pfn_t pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+ /*
+ * Following three fields are valid if p2m_cr3 contains a value different
+ * from 0.
+ * p2m_cr3 is the root of the address space where p2m_vaddr is valid.
+ * p2m_cr3 is in the same format as a cr3 value in the vcpu register state
+ * and holds the folded machine frame number (via xen_pfn_to_cr3) of a
+ * L3 or L4 page table.
+ * p2m_vaddr holds the virtual address of the linear p2m list. All entries
+ * in the range [0...max_pfn[ are accessible via this pointer.
+ * p2m_generation will be incremented by the guest before and after each
+ * change of the mappings of the p2m list. p2m_generation starts at 0 and
+ * a value with the least significant bit set indicates that a mapping
+ * update is in progress. This allows guest external software (e.g. in Dom0)
+ * to verify that read mappings are consistent and whether they have changed
+ * since the last check.
+ * Modifying a p2m element in the linear p2m list is allowed via an atomic
+ * write only.
+ */
+ unsigned long p2m_cr3; /* cr3 value of the p2m address space */
+ unsigned long p2m_vaddr; /* virtual address of the p2m list */
+ unsigned long p2m_generation; /* generation count of p2m mapping */
+#ifdef __i386__
+ /* There's no room for this field in the generic structure. */
+ uint32_t wc_sec_hi;
+#endif
+};
+typedef struct arch_shared_info arch_shared_info_t;
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+struct xen_arch_domainconfig {
+#define _XEN_X86_EMU_LAPIC 0
+#define XEN_X86_EMU_LAPIC (1U<<_XEN_X86_EMU_LAPIC)
+#define _XEN_X86_EMU_HPET 1
+#define XEN_X86_EMU_HPET (1U<<_XEN_X86_EMU_HPET)
+#define _XEN_X86_EMU_PM 2
+#define XEN_X86_EMU_PM (1U<<_XEN_X86_EMU_PM)
+#define _XEN_X86_EMU_RTC 3
+#define XEN_X86_EMU_RTC (1U<<_XEN_X86_EMU_RTC)
+#define _XEN_X86_EMU_IOAPIC 4
+#define XEN_X86_EMU_IOAPIC (1U<<_XEN_X86_EMU_IOAPIC)
+#define _XEN_X86_EMU_PIC 5
+#define XEN_X86_EMU_PIC (1U<<_XEN_X86_EMU_PIC)
+#define _XEN_X86_EMU_VGA 6
+#define XEN_X86_EMU_VGA (1U<<_XEN_X86_EMU_VGA)
+#define _XEN_X86_EMU_IOMMU 7
+#define XEN_X86_EMU_IOMMU (1U<<_XEN_X86_EMU_IOMMU)
+#define _XEN_X86_EMU_PIT 8
+#define XEN_X86_EMU_PIT (1U<<_XEN_X86_EMU_PIT)
+#define _XEN_X86_EMU_USE_PIRQ 9
+#define XEN_X86_EMU_USE_PIRQ (1U<<_XEN_X86_EMU_USE_PIRQ)
+#define _XEN_X86_EMU_VPCI 10
+#define XEN_X86_EMU_VPCI (1U<<_XEN_X86_EMU_VPCI)
+
+#define XEN_X86_EMU_ALL (XEN_X86_EMU_LAPIC | XEN_X86_EMU_HPET | \
+ XEN_X86_EMU_PM | XEN_X86_EMU_RTC | \
+ XEN_X86_EMU_IOAPIC | XEN_X86_EMU_PIC | \
+ XEN_X86_EMU_VGA | XEN_X86_EMU_IOMMU | \
+ XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\
+ XEN_X86_EMU_VPCI)
+ uint32_t emulation_flags;
+
+/*
+ * Select whether to use a relaxed behavior for accesses to MSRs not explicitly
+ * handled by Xen instead of injecting a #GP to the guest. Note this option
+ * doesn't allow the guest to read or write to the underlying MSR.
+ */
+#define XEN_X86_MSR_RELAXED (1u << 0)
+ uint32_t misc_flags;
+};
+
+/* Max XEN_X86_* constant. Used for ABI checking. */
+#define XEN_X86_MISC_FLAGS_MAX XEN_X86_MSR_RELAXED
+
+#endif
+
+/*
+ * Representations of architectural CPUID and MSR information. Used as the
+ * serialised version of Xen's internal representation.
+ */
+typedef struct xen_cpuid_leaf {
+#define XEN_CPUID_NO_SUBLEAF 0xffffffffu
+ uint32_t leaf, subleaf;
+ uint32_t a, b, c, d;
+} xen_cpuid_leaf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_cpuid_leaf_t);
+
+typedef struct xen_msr_entry {
+ uint32_t idx;
+ uint32_t flags; /* Reserved MBZ. */
+ uint64_t val;
+} xen_msr_entry_t;
+DEFINE_XEN_GUEST_HANDLE(xen_msr_entry_t);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_fpu_taskswitch(int set);
+ * `
+ * Sets (if set!=0) or clears (if set==0) CR0.TS.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_debugreg(int regno, unsigned long value);
+ *
+ * ` unsigned long
+ * ` HYPERVISOR_get_debugreg(int regno);
+ * For 0<=reg<=7, returns the debug register value.
+ * For other values of reg, returns ((unsigned long)-EINVAL).
+ * (Unfortunately, this interface is defective.)
+ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+/*
+ * Debug console IO port, also called "port E9 hack". Each character written
+ * to this IO port will be printed on the hypervisor console, subject to log
+ * level restrictions.
+ */
+#define XEN_HVM_DEBUGCONS_IOPORT 0xe9
+
+#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/event_channel.h b/include/hw/xen/interface/event_channel.h
new file mode 100644
index 0000000000..0d91a1c4af
--- /dev/null
+++ b/include/hw/xen/interface/event_channel.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * event_channel.h
+ *
+ * Event channels between domains.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+
+#include "xen.h"
+
+/*
+ * `incontents 150 evtchn Event Channels
+ *
+ * Event channels are the basic primitive provided by Xen for event
+ * notifications. An event is the Xen equivalent of a hardware
+ * interrupt. They essentially store one bit of information, the event
+ * of interest is signalled by transitioning this bit from 0 to 1.
+ *
+ * Notifications are received by a guest via an upcall from Xen,
+ * indicating when an event arrives (setting the bit). Further
+ * notifications are masked until the bit is cleared again (therefore,
+ * guests must check the value of the bit after re-enabling event
+ * delivery to ensure no missed notifications).
+ *
+ * Event notifications can be masked by setting a flag; this is
+ * equivalent to disabling interrupts and can be used to ensure
+ * atomicity of certain operations in the guest kernel.
+ *
+ * Event channels are represented by the evtchn_* fields in
+ * struct shared_info and struct vcpu_info.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args)
+ * `
+ * @cmd == EVTCHNOP_* (event-channel operation).
+ * @args == struct evtchn_* Operation-specific extra arguments (NULL if none).
+ */
+
+/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */
+#define EVTCHNOP_bind_interdomain 0
+#define EVTCHNOP_bind_virq 1
+#define EVTCHNOP_bind_pirq 2
+#define EVTCHNOP_close 3
+#define EVTCHNOP_send 4
+#define EVTCHNOP_status 5
+#define EVTCHNOP_alloc_unbound 6
+#define EVTCHNOP_bind_ipi 7
+#define EVTCHNOP_bind_vcpu 8
+#define EVTCHNOP_unmask 9
+#define EVTCHNOP_reset 10
+#define EVTCHNOP_init_control 11
+#define EVTCHNOP_expand_array 12
+#define EVTCHNOP_set_priority 13
+#ifdef __XEN__
+#define EVTCHNOP_reset_cont 14
+#endif
+/* ` } */
+
+typedef uint32_t evtchn_port_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+struct evtchn_alloc_unbound {
+ /* IN parameters */
+ domid_t dom, remote_dom;
+ /* OUT parameters */
+ evtchn_port_t port;
+};
+typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
+ *
+ * In case the peer domain has already tried to set our event channel
+ * pending, before it was bound, EVTCHNOP_bind_interdomain always sets
+ * the local event channel pending.
+ *
+ * The usual pattern of use, in the guest's upcall (or subsequent
+ * handler) is as follows: (Re-enable the event channel for subsequent
+ * signalling and then) check for the existence of whatever condition
+ * is being waited for by other means, and take whatever action is
+ * needed (if any).
+ *
+ * NOTES:
+ * 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+struct evtchn_bind_interdomain {
+ /* IN parameters. */
+ domid_t remote_dom;
+ evtchn_port_t remote_port;
+ /* OUT parameters. */
+ evtchn_port_t local_port;
+};
+typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
+ * vcpu.
+ * NOTES:
+ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
+ * in xen.h for the classification of each VIRQ.
+ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
+ * re-bound via EVTCHNOP_bind_vcpu.
+ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
+ * The allocated event channel is bound to the specified vcpu and the
+ * binding cannot be changed.
+ */
+struct evtchn_bind_virq {
+ /* IN parameters. */
+ uint32_t virq; /* enum virq */
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_virq evtchn_bind_virq_t;
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ <irq>).
+ * NOTES:
+ * 1. A physical IRQ may be bound to at most one event channel per domain.
+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+struct evtchn_bind_pirq {
+ /* IN parameters. */
+ uint32_t pirq;
+#define BIND_PIRQ__WILL_SHARE 1
+ uint32_t flags; /* BIND_PIRQ__* */
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
+
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ * NOTES:
+ * 1. The allocated event channel is bound to the specified vcpu. The binding
+ * may not be changed.
+ */
+struct evtchn_bind_ipi {
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+struct evtchn_close {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_close evtchn_close_t;
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+struct evtchn_send {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_send evtchn_send_t;
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
+ * channel for which <dom> is not DOMID_SELF.
+ */
+struct evtchn_status {
+ /* IN parameters */
+ domid_t dom;
+ evtchn_port_t port;
+ /* OUT parameters */
+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
+ uint32_t status;
+ uint32_t vcpu; /* VCPU to which this channel is bound. */
+ union {
+ struct {
+ domid_t dom;
+ } unbound; /* EVTCHNSTAT_unbound */
+ struct {
+ domid_t dom;
+ evtchn_port_t port;
+ } interdomain; /* EVTCHNSTAT_interdomain */
+ uint32_t pirq; /* EVTCHNSTAT_pirq */
+ uint32_t virq; /* EVTCHNSTAT_virq */
+ } u;
+};
+typedef struct evtchn_status evtchn_status_t;
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ * 1. IPI-bound channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
+ * This binding cannot be changed.
+ * 3. All other channels notify vcpu0 by default. This default is set when
+ * the channel is allocated (a port that is freed and subsequently reused
+ * has its binding reset to vcpu0).
+ */
+struct evtchn_bind_vcpu {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t vcpu;
+};
+typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+struct evtchn_unmask {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+typedef struct evtchn_unmask evtchn_unmask_t;
+
+/*
+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ * 3. Destroys all control blocks and event array, resets event channel
+ * operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO
+ * ABI was used. Guests should not bind events during EVTCHNOP_reset call
+ * as these events are likely to be lost.
+ */
+struct evtchn_reset {
+ /* IN parameters. */
+ domid_t dom;
+};
+typedef struct evtchn_reset evtchn_reset_t;
+
+/*
+ * EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
+ *
+ * Note: any events that are currently pending will not be resent and
+ * will be lost. Guests should call this before binding any event to
+ * avoid losing any events.
+ */
+struct evtchn_init_control {
+ /* IN parameters. */
+ uint64_t control_gfn;
+ uint32_t offset;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ uint8_t link_bits;
+ uint8_t _pad[7];
+};
+typedef struct evtchn_init_control evtchn_init_control_t;
+
+/*
+ * EVTCHNOP_expand_array: add an additional page to the event array.
+ */
+struct evtchn_expand_array {
+ /* IN parameters. */
+ uint64_t array_gfn;
+};
+typedef struct evtchn_expand_array evtchn_expand_array_t;
+
+/*
+ * EVTCHNOP_set_priority: set the priority for an event channel.
+ */
+struct evtchn_set_priority {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t priority;
+};
+typedef struct evtchn_set_priority evtchn_set_priority_t;
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
+ * `
+ * Superceded by new event_channel_op() hypercall since 0x00030202.
+ */
+struct evtchn_op {
+ uint32_t cmd; /* enum event_channel_op */
+ union {
+ evtchn_alloc_unbound_t alloc_unbound;
+ evtchn_bind_interdomain_t bind_interdomain;
+ evtchn_bind_virq_t bind_virq;
+ evtchn_bind_pirq_t bind_pirq;
+ evtchn_bind_ipi_t bind_ipi;
+ evtchn_close_t close;
+ evtchn_send_t send;
+ evtchn_status_t status;
+ evtchn_bind_vcpu_t bind_vcpu;
+ evtchn_unmask_t unmask;
+ } u;
+};
+typedef struct evtchn_op evtchn_op_t;
+DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
+
+/*
+ * 2-level ABI
+ */
+
+#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+
+/*
+ * FIFO ABI
+ */
+
+/* Events may have priorities from 0 (highest) to 15 (lowest). */
+#define EVTCHN_FIFO_PRIORITY_MAX 0
+#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
+#define EVTCHN_FIFO_PRIORITY_MIN 15
+
+#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
+
+typedef uint32_t event_word_t;
+
+#define EVTCHN_FIFO_PENDING 31
+#define EVTCHN_FIFO_MASKED 30
+#define EVTCHN_FIFO_LINKED 29
+#define EVTCHN_FIFO_BUSY 28
+
+#define EVTCHN_FIFO_LINK_BITS 17
+#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
+
+#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
+
+struct evtchn_fifo_control_block {
+ uint32_t ready;
+ uint32_t _rsvd;
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t;
+
+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/features.h b/include/hw/xen/interface/features.h
new file mode 100644
index 0000000000..d2a9175aae
--- /dev/null
+++ b/include/hw/xen/interface/features.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * features.h
+ *
+ * Feature flags, reported by XENVER_get_features.
+ *
+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_FEATURES_H__
+#define __XEN_PUBLIC_FEATURES_H__
+
+/*
+ * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES
+ *
+ * The list of all the features the guest supports. They are set by
+ * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES
+ * string. The format is the feature names (as given here without the
+ * "XENFEAT_" prefix) separated by '|' characters.
+ * If a feature is required for the kernel to function then the feature name
+ * must be preceded by a '!' character.
+ *
+ * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the
+ * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0,
+ */
+
+/*
+ * If set, the guest does not need to write-protect its pagetables, and can
+ * update them via direct writes.
+ */
+#define XENFEAT_writable_page_tables 0
+
+/*
+ * If set, the guest does not need to write-protect its segment descriptor
+ * tables, and can update them via direct writes.
+ */
+#define XENFEAT_writable_descriptor_tables 1
+
+/*
+ * If set, translation between the guest's 'pseudo-physical' address space
+ * and the host's machine address space are handled by the hypervisor. In this
+ * mode the guest does not need to perform phys-to/from-machine translations
+ * when performing page table operations.
+ */
+#define XENFEAT_auto_translated_physmap 2
+
+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
+#define XENFEAT_supervisor_mode_kernel 3
+
+/*
+ * If set, the guest does not need to allocate x86 PAE page directories
+ * below 4GB. This flag is usually implied by auto_translated_physmap.
+ */
+#define XENFEAT_pae_pgdir_above_4gb 4
+
+/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
+#define XENFEAT_mmu_pt_update_preserve_ad 5
+
+/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
+#define XENFEAT_highmem_assist 6
+
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector 8
+
+/* x86: pvclock algorithm is safe to use on HVM */
+#define XENFEAT_hvm_safe_pvclock 9
+
+/* x86: pirq can be used by HVM guests */
+#define XENFEAT_hvm_pirqs 10
+
+/* operation as Dom0 is supported */
+#define XENFEAT_dom0 11
+
+/* Xen also maps grant references at pfn = mfn.
+ * This feature flag is deprecated and should not be used.
+#define XENFEAT_grant_map_identity 12
+ */
+
+/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
+#define XENFEAT_memory_op_vnode_supported 13
+
+/* arm: Hypervisor supports ARM SMC calling convention. */
+#define XENFEAT_ARM_SMCCC_supported 14
+
+/*
+ * x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP
+ * must be located in lower 1MB, as required by ACPI Specification for IA-PC
+ * systems.
+ * This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains
+ * the "linux" string.
+ */
+#define XENFEAT_linux_rsdp_unrestricted 15
+
+/*
+ * A direct-mapped (or 1:1 mapped) domain is a domain for which its
+ * local pages have gfn == mfn. If a domain is direct-mapped,
+ * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped
+ * is set.
+ *
+ * If neither flag is set (e.g. older Xen releases) the assumptions are:
+ * - not auto_translated domains (x86 only) are always direct-mapped
+ * - on x86, auto_translated domains are not direct-mapped
+ * - on ARM, Dom0 is direct-mapped, DomUs are not
+ */
+#define XENFEAT_not_direct_mapped 16
+#define XENFEAT_direct_mapped 17
+
+#define XENFEAT_NR_SUBMAPS 1
+
+#endif /* __XEN_PUBLIC_FEATURES_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/grant_table.h b/include/hw/xen/interface/grant_table.h
index 2af0cbdde3..1dfa17a6d0 100644
--- a/include/hw/xen/interface/grant_table.h
+++ b/include/hw/xen/interface/grant_table.h
@@ -1,36 +1,669 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* grant_table.h
*
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2004, K A Fraser
*/
#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
#define __XEN_PUBLIC_GRANT_TABLE_H__
+#include "xen.h"
+
+/*
+ * `incontents 150 gnttab Grant Tables
+ *
+ * Xen's grant tables provide a generic mechanism to memory sharing
+ * between domains. This shared memory interface underpins the split
+ * device drivers for block and network IO.
+ *
+ * Each domain has its own grant table. This is a data structure that
+ * is shared with Xen; it allows the domain to tell Xen what kind of
+ * permissions other domains have on its pages. Entries in the grant
+ * table are identified by grant references. A grant reference is an
+ * integer, which indexes into the grant table. It acts as a
+ * capability which the grantee can use to perform operations on the
+ * granter's memory.
+ *
+ * This capability-based system allows shared-memory communications
+ * between unprivileged domains. A grant reference also encapsulates
+ * the details of a shared page, removing the need for a domain to
+ * know the real machine address of a page it is sharing. This makes
+ * it possible to share memory correctly with domains running in
+ * fully virtualised memory.
+ */
+
+/***********************************
+ * GRANT TABLE REPRESENTATION
+ */
+
+/* Some rough guidelines on accessing and updating grant-table entries
+ * in a concurrency-safe manner. For more information, Linux contains a
+ * reference implementation for guest OSes (drivers/xen/grant_table.c, see
+ * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD
+ *
+ * NB. WMB is a no-op on current-generation x86 processors. However, a
+ * compiler barrier will still be required.
+ *
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ *
+ * Invalidating an unused GTF_permit_access entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *
+ * Invalidating an in-use GTF_permit_access entry:
+ * This cannot be done directly. Request assistance from the domain controller
+ * which can set a timeout on the use of a grant entry and take necessary
+ * action. (NB. This is not yet implemented!).
+ *
+ * Invalidating an unused GTF_accept_transfer entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
+ * The guest must /not/ modify the grant entry until the address of the
+ * transferred frame is written. It is safe for the guest to spin waiting
+ * for this to occur (detect by observing GTF_transfer_completed in
+ * ent->flags).
+ *
+ * Invalidating a committed GTF_accept_transfer entry:
+ * 1. Wait for (ent->flags & GTF_transfer_completed).
+ *
+ * Changing a GTF_permit_access from writable to read-only:
+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
+ *
+ * Changing a GTF_permit_access from read-only to writable:
+ * Use SMP-safe bit-setting instruction.
+ */
+
/*
* Reference to a grant entry in a specified domain's grant table.
*/
typedef uint32_t grant_ref_t;
+/*
+ * A grant table comprises a packed array of grant entries in one or more
+ * page frames shared between Xen and a guest.
+ * [XEN]: This field is written by Xen and read by the sharing guest.
+ * [GST]: This field is written by the guest and read by Xen.
+ */
+
+/*
+ * Version 1 of the grant table entry structure is maintained largely for
+ * backwards compatibility. New guests are recommended to support using
+ * version 2 to overcome version 1 limitations, but to default to version 1.
+ */
+#if __XEN_INTERFACE_VERSION__ < 0x0003020a
+#define grant_entry_v1 grant_entry
+#define grant_entry_v1_t grant_entry_t
+#endif
+struct grant_entry_v1 {
+ /* GTF_xxx: various type and flag information. [XEN,GST] */
+ uint16_t flags;
+ /* The domain being granted foreign privileges. [GST] */
+ domid_t domid;
+ /*
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
+ */
+ uint32_t frame;
+};
+typedef struct grant_entry_v1 grant_entry_v1_t;
+
+/* The first few grant table entries will be preserved across grant table
+ * version changes and may be pre-populated at domain creation by tools.
+ */
+#define GNTTAB_NR_RESERVED_ENTRIES 8
+#define GNTTAB_RESERVED_CONSOLE 0
+#define GNTTAB_RESERVED_XENSTORE 1
+
+/*
+ * Type of grant entry.
+ * GTF_invalid: This grant entry grants no privileges.
+ * GTF_permit_access: Allow @domid to map/access @frame.
+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
+ * to this guest. Xen writes the page number to @frame.
+ * GTF_transitive: Allow @domid to transitively access a subrange of
+ * @trans_grant in @trans_domid. No mappings are allowed.
+ */
+#define GTF_invalid (0U<<0)
+#define GTF_permit_access (1U<<0)
+#define GTF_accept_transfer (2U<<0)
+#define GTF_transitive (3U<<0)
+#define GTF_type_mask (3U<<0)
+
+/*
+ * Subflags for GTF_permit_access and GTF_transitive.
+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * Further subflags for GTF_permit_access only.
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for
+ * mappings of the grant [GST]
+ * GTF_sub_page: Grant access to only a subrange of the page. @domid
+ * will only be allowed to copy from the grant, and not
+ * map it. [GST]
+ */
+#define _GTF_readonly (2)
+#define GTF_readonly (1U<<_GTF_readonly)
+#define _GTF_reading (3)
+#define GTF_reading (1U<<_GTF_reading)
+#define _GTF_writing (4)
+#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
+#define _GTF_sub_page (8)
+#define GTF_sub_page (1U<<_GTF_sub_page)
+
+/*
+ * Subflags for GTF_accept_transfer:
+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
+ * to transferring ownership of a page frame. When a guest sees this flag
+ * it must /not/ modify the grant entry until GTF_transfer_completed is
+ * set by Xen.
+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
+ * after reading GTF_transfer_committed. Xen will always write the frame
+ * address, followed by ORing this flag, in a timely manner.
+ */
+#define _GTF_transfer_committed (2)
+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
+#define _GTF_transfer_completed (3)
+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+
+/*
+ * Version 2 grant table entries. These fulfil the same role as
+ * version 1 entries, but can represent more complicated operations.
+ * Any given domain will have either a version 1 or a version 2 table,
+ * and every entry in the table will be the same version.
+ *
+ * The interface by which domains use grant references does not depend
+ * on the grant table version in use by the other domain.
+ */
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * Version 1 and version 2 grant entries share a common prefix. The
+ * fields of the prefix are documented as part of struct
+ * grant_entry_v1.
+ */
+struct grant_entry_header {
+ uint16_t flags;
+ domid_t domid;
+};
+typedef struct grant_entry_header grant_entry_header_t;
+
+/*
+ * Version 2 of the grant entry structure.
+ */
+union grant_entry_v2 {
+ grant_entry_header_t hdr;
+
+ /*
+ * This member is used for V1-style full page grants, where either:
+ *
+ * -- hdr.type is GTF_accept_transfer, or
+ * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
+ *
+ * In that case, the frame field has the same semantics as the
+ * field of the same name in the V1 entry structure.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint32_t pad0;
+ uint64_t frame;
+ } full_page;
+
+ /*
+ * If the grant type is GTF_grant_access and GTF_sub_page is set,
+ * @domid is allowed to access bytes [@page_off,@page_off+@length)
+ * in frame @frame.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
+ } sub_page;
+
+ /*
+ * If the grant is GTF_transitive, @domid is allowed to use the
+ * grant @gref in domain @trans_domid, as if it was the local
+ * domain. Obviously, the transitive access must be compatible
+ * with the original grant.
+ *
+ * The current version of Xen does not allow transitive grants
+ * to be mapped.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
+ } transitive;
+
+ uint32_t __spacer[4]; /* Pad to a power of two */
+};
+typedef union grant_entry_v2 grant_entry_v2_t;
+
+typedef uint16_t grant_status_t;
+
+#endif /* __XEN_INTERFACE_VERSION__ */
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
+ */
+
+/* ` enum neg_errnoval
+ * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd,
+ * ` void *args,
+ * ` unsigned int count)
+ * `
+ *
+ * @args points to an array of a per-command data structure. The array
+ * has @count members
+ */
+
+/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */
+#define GNTTABOP_map_grant_ref 0
+#define GNTTABOP_unmap_grant_ref 1
+#define GNTTABOP_setup_table 2
+#define GNTTABOP_dump_table 3
+#define GNTTABOP_transfer 4
+#define GNTTABOP_copy 5
+#define GNTTABOP_query_size 6
+#define GNTTABOP_unmap_and_replace 7
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+#define GNTTABOP_set_version 8
+#define GNTTABOP_get_status_frames 9
+#define GNTTABOP_get_version 10
+#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
+#endif /* __XEN_INTERFACE_VERSION__ */
+/* ` } */
+
+/*
+ * Handle to track a mapping created via a grant reference.
+ */
+typedef uint32_t grant_handle_t;
+
+/*
+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
+ * that must be presented later to destroy the mapping(s). On error, <status>
+ * is a negative status code.
+ * NOTES:
+ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
+ * via which I/O devices may access the granted frame.
+ * 2. If GNTMAP_host_map is specified then a mapping will be added at
+ * either a host virtual address in the current address space, or at
+ * a PTE at the specified machine address. The type of mapping to
+ * perform is selected through the GNTMAP_contains_pte flag, and the
+ * address is specified in <host_addr>.
+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
+ * host mapping is destroyed by other means then it is *NOT* guaranteed
+ * to be accounted to the correct grant reference!
+ */
+struct gnttab_map_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint32_t flags; /* GNTMAP_* */
+ grant_ref_t ref;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+};
+typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
+
+/*
+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
+ * field is ignored. If non-zero, they must refer to a device/host mapping
+ * that is tracked by <handle>
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+struct gnttab_unmap_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t dev_bus_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
+
+/*
+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
+ * Only <nr_frames> addresses are written, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ * 3. Xen may not support more than a single grant-table page per domain.
+ */
+struct gnttab_setup_table {
+ /* IN parameters. */
+ domid_t dom;
+ uint32_t nr_frames;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
+ XEN_GUEST_HANDLE(ulong) frame_list;
+#else
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+#endif
+};
+typedef struct gnttab_setup_table gnttab_setup_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
+
+/*
+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
+ * xen console. Debugging use only.
+ */
+struct gnttab_dump_table {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_dump_table gnttab_dump_table_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
+
+/*
+ * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
+ * has previously registered its interest in the transfer via <domid, ref>.
+ *
+ * Note that, even if the transfer fails, the specified page no longer belongs
+ * to the calling domain *unless* the error is GNTST_bad_page.
+ *
+ * Note further that only PV guests can use this operation.
+ */
+struct gnttab_transfer {
+ /* IN parameters. */
+ xen_pfn_t mfn;
+ domid_t domid;
+ grant_ref_t ref;
+ /* OUT parameters. */
+ int16_t status;
+};
+typedef struct gnttab_transfer gnttab_transfer_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
+
+
+/*
+ * GNTTABOP_copy: Hypervisor based copy
+ * source and destinations can be eithers MFNs or, for foreign domains,
+ * grant references. the foreign domain has to grant read/write access
+ * in its grant table.
+ *
+ * The flags specify what type source and destinations are (either MFN
+ * or grant reference).
+ *
+ * Note that this can also be used to copy data between two domains
+ * via a third party if the source and destination domains had previously
+ * grant appropriate access to their pages to the third party.
+ *
+ * source_offset specifies an offset in the source frame, dest_offset
+ * the offset in the target frame and len specifies the number of
+ * bytes to be copied.
+ */
+
+#define _GNTCOPY_source_gref (0)
+#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
+#define _GNTCOPY_dest_gref (1)
+#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
+
+struct gnttab_copy {
+ /* IN parameters. */
+ struct gnttab_copy_ptr {
+ union {
+ grant_ref_t ref;
+ xen_pfn_t gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
+};
+typedef struct gnttab_copy gnttab_copy_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+struct gnttab_query_size {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ uint32_t nr_frames;
+ uint32_t max_nr_frames;
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_query_size gnttab_query_size_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
+
+/*
+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>. <new_addr> will be
+ * redirected to the null entry.
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 2. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+struct gnttab_unmap_and_replace {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t new_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
+
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * GNTTABOP_set_version: Request a particular version of the grant
+ * table shared table structure. This operation may be used to toggle
+ * between different versions, but must be performed while no grants
+ * are active. The only defined versions are 1 and 2.
+ */
+struct gnttab_set_version {
+ /* IN/OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_set_version gnttab_set_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t);
+
+
+/*
+ * GNTTABOP_get_status_frames: Get the list of frames used to store grant
+ * status for <dom>. In grant format version 2, the status is separated
+ * from the other shared grant fields to allow more efficient synchronization
+ * using barriers instead of atomic cmpexch operations.
+ * <nr_frames> specify the size of vector <frame_list>.
+ * The frame addresses are returned in the <frame_list>.
+ * Only <nr_frames> addresses are returned, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+struct gnttab_get_status_frames {
+ /* IN parameters. */
+ uint32_t nr_frames;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+ XEN_GUEST_HANDLE(uint64_t) frame_list;
+};
+typedef struct gnttab_get_status_frames gnttab_get_status_frames_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t);
+
+/*
+ * GNTTABOP_get_version: Get the grant table version which is in
+ * effect for domain <dom>.
+ */
+struct gnttab_get_version {
+ /* IN parameters */
+ domid_t dom;
+ uint16_t pad;
+ /* OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_get_version gnttab_get_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t);
+
+/*
+ * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries.
+ */
+struct gnttab_swap_grant_ref {
+ /* IN parameters */
+ grant_ref_t ref_a;
+ grant_ref_t ref_b;
+ /* OUT parameters */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
+
+/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1u<<0)
+#define GNTTAB_CACHE_INVAL (1u<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
+ uint32_t op;
+};
+typedef struct gnttab_cache_flush gnttab_cache_flush_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t);
+
+#endif /* __XEN_INTERFACE_VERSION__ */
+
+/*
+ * Bitfield values for gnttab_map_grant_ref.flags.
+ */
+ /* Map the grant entry for access by I/O devices. */
+#define _GNTMAP_device_map (0)
+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
+ /* Map the grant entry for access by host CPUs. */
+#define _GNTMAP_host_map (1)
+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
+ /* Accesses to the granted frame will be restricted to read-only access. */
+#define _GNTMAP_readonly (2)
+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
+ /*
+ * GNTMAP_host_map subflag:
+ * 0 => The host mapping is usable only by the guest OS.
+ * 1 => The host mapping is usable by guest OS + current application.
+ */
+#define _GNTMAP_application_map (3)
+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
+
+ /*
+ * GNTMAP_contains_pte subflag:
+ * 0 => This map request contains a host virtual address.
+ * 1 => This map request contains the machine addess of the PTE to update.
+ */
+#define _GNTMAP_contains_pte (4)
+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
+
+/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
+/*
+ * Values for error status returns. All errors are -ve.
+ */
+/* ` enum grant_status { */
+#define GNTST_okay (0) /* Normal return. */
+#define GNTST_general_error (-1) /* General undefined error. */
+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
+#define GNTST_address_too_big (-11) /* transfer page address too large. */
+#define GNTST_eagain (-12) /* Operation not done; try again. */
+#define GNTST_no_space (-13) /* Out of space (handles etc). */
+/* ` } */
+
+#define GNTTABOP_error_msgs { \
+ "okay", \
+ "undefined error", \
+ "unrecognised domain id", \
+ "invalid grant reference", \
+ "invalid mapping handle", \
+ "invalid virtual address", \
+ "invalid device address", \
+ "no spare translation slot in the I/O MMU", \
+ "permission denied", \
+ "bad page", \
+ "copy arguments cross page boundary", \
+ "page address size too large", \
+ "operation not done; try again", \
+ "out of space", \
+}
+
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/hvm/hvm_op.h b/include/hw/xen/interface/hvm/hvm_op.h
new file mode 100644
index 0000000000..e22adf0319
--- /dev/null
+++ b/include/hw/xen/interface/hvm/hvm_op.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2007, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
+#define __XEN_PUBLIC_HVM_HVM_OP_H__
+
+#include "../xen.h"
+#include "../trace.h"
+#include "../event_channel.h"
+
+/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
+#define HVMOP_set_param 0
+#define HVMOP_get_param 1
+struct xen_hvm_param {
+ domid_t domid; /* IN */
+ uint16_t pad;
+ uint32_t index; /* IN */
+ uint64_t value; /* IN/OUT */
+};
+typedef struct xen_hvm_param xen_hvm_param_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
+
+struct xen_hvm_altp2m_suppress_ve {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ uint32_t pad2;
+ uint64_t gfn;
+};
+
+struct xen_hvm_altp2m_suppress_ve_multi {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ int32_t first_error; /* Should be set to 0. */
+ uint64_t first_gfn; /* Value may be updated. */
+ uint64_t last_gfn;
+ uint64_t first_error_gfn; /* Gfn of the first error. */
+};
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
+/* Set the logical level of one of a domain's PCI INTx wires. */
+#define HVMOP_set_pci_intx_level 2
+struct xen_hvm_set_pci_intx_level {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
+ uint8_t domain, bus, device, intx;
+ /* Assertion level (0 = unasserted, 1 = asserted). */
+ uint8_t level;
+};
+typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
+
+/* Set the logical level of one of a domain's ISA IRQ wires. */
+#define HVMOP_set_isa_irq_level 3
+struct xen_hvm_set_isa_irq_level {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* ISA device identification, by ISA IRQ (0-15). */
+ uint8_t isa_irq;
+ /* Assertion level (0 = unasserted, 1 = asserted). */
+ uint8_t level;
+};
+typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
+
+#define HVMOP_set_pci_link_route 4
+struct xen_hvm_set_pci_link_route {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* PCI link identifier (0-3). */
+ uint8_t link;
+ /* ISA IRQ (1-15), or 0 (disable link). */
+ uint8_t isa_irq;
+};
+typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
+
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
+/* Flushes all VCPU TLBs: @arg must be NULL. */
+#define HVMOP_flush_tlbs 5
+
+/*
+ * hvmmem_type_t should not be defined when generating the corresponding
+ * compat header. This will ensure that the improperly named HVMMEM_(*)
+ * values are defined only once.
+ */
+#ifndef XEN_GENERATING_COMPAT_HEADERS
+
+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
+#else
+ HVMMEM_unused, /* Placeholder; setting memory to this type
+ will fail for code after 4.7.0 */
+#endif
+ HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type
+ changes to this value are only allowed after
+ an ioreq server has claimed its ownership.
+ Only pages with HVMMEM_ram_rw are allowed to
+ change to this type; conversely, pages with
+ this type are only allowed to be changed back
+ to HVMMEM_ram_rw. */
+} hvmmem_type_t;
+
+#endif /* XEN_GENERATING_COMPAT_HEADERS */
+
+/* Hint from PV drivers for pagetable destruction. */
+#define HVMOP_pagetable_dying 9
+struct xen_hvm_pagetable_dying {
+ /* Domain with a pagetable about to be destroyed. */
+ domid_t domid;
+ uint16_t pad[3]; /* align next field on 8-byte boundary */
+ /* guest physical address of the toplevel pagetable dying */
+ uint64_t gpa;
+};
+typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);
+
+/* Get the current Xen time, in nanoseconds since system boot. */
+#define HVMOP_get_time 10
+struct xen_hvm_get_time {
+ uint64_t now; /* OUT */
+};
+typedef struct xen_hvm_get_time xen_hvm_get_time_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);
+
+#define HVMOP_xentrace 11
+struct xen_hvm_xentrace {
+ uint16_t event, extra_bytes;
+ uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
+};
+typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* Deprecated by XENMEM_access_op_set_access */
+#define HVMOP_set_mem_access 12
+
+/* Deprecated by XENMEM_access_op_get_access */
+#define HVMOP_get_mem_access 13
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* OUT variable. */
+ uint16_t mem_type;
+ uint16_t pad[2]; /* align next field on 8-byte boundary */
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/*
+ * Definitions relating to DMOP_create_ioreq_server. (Defined here for
+ * backwards compatibility).
+ */
+
+#define HVM_IOREQSRV_BUFIOREQ_OFF 0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#define HVMOP_guest_request_vm_event 24
+
+/* HVMOP_altp2m: perform altp2m state operations */
+#define HVMOP_altp2m 25
+
+#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
+
+struct xen_hvm_altp2m_domain_state {
+ /* IN or OUT variable on/off */
+ uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+struct xen_hvm_altp2m_vcpu_enable_notify {
+ uint32_t vcpu_id;
+ uint32_t pad;
+ /* #VE info area gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+struct xen_hvm_altp2m_vcpu_disable_notify {
+ uint32_t vcpu_id;
+};
+typedef struct xen_hvm_altp2m_vcpu_disable_notify xen_hvm_altp2m_vcpu_disable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t);
+
+struct xen_hvm_altp2m_view {
+ /* IN/OUT variable */
+ uint16_t view;
+ uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040a00
+struct xen_hvm_altp2m_set_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
+
+struct xen_hvm_altp2m_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_mem_access xen_hvm_altp2m_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_mem_access_t);
+
+struct xen_hvm_altp2m_set_mem_access_multi {
+ /* view */
+ uint16_t view;
+ uint16_t pad;
+ /* Number of pages */
+ uint32_t nr;
+ /*
+ * Used for continuation purposes.
+ * Must be set to zero upon initial invocation.
+ */
+ uint64_t opaque;
+ /* List of pfns to set access for */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /* Corresponding list of access settings for pfn_list */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+
+struct xen_hvm_altp2m_change_gfn {
+ /* view */
+ uint16_t view;
+ uint16_t pad1;
+ uint32_t pad2;
+ /* old gfn */
+ uint64_t old_gfn;
+ /* new gfn, INVALID_GFN (~0UL) means revert */
+ uint64_t new_gfn;
+};
+typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
+
+struct xen_hvm_altp2m_get_vcpu_p2m_idx {
+ uint32_t vcpu_id;
+ uint16_t altp2m_idx;
+};
+
+struct xen_hvm_altp2m_set_visibility {
+ uint16_t altp2m_idx;
+ uint8_t visible;
+ uint8_t pad;
+};
+
+struct xen_hvm_altp2m_op {
+ uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
+ uint32_t cmd;
+/* Get/set the altp2m state for a domain */
+#define HVMOP_altp2m_get_domain_state 1
+#define HVMOP_altp2m_set_domain_state 2
+/* Set a given VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify 3
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m 4
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m 5
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m 6
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 7
+/* Change a p2m entry to have a different gfn->mfn mapping */
+#define HVMOP_altp2m_change_gfn 8
+/* Set access for an array of pages */
+#define HVMOP_altp2m_set_mem_access_multi 9
+/* Set the "Suppress #VE" bit on a page */
+#define HVMOP_altp2m_set_suppress_ve 10
+/* Get the "Suppress #VE" bit of a page */
+#define HVMOP_altp2m_get_suppress_ve 11
+/* Get the access of a page of memory from a certain view */
+#define HVMOP_altp2m_get_mem_access 12
+/* Disable altp2m event notifications for a given VCPU */
+#define HVMOP_altp2m_vcpu_disable_notify 13
+/* Get the active vcpu p2m index */
+#define HVMOP_altp2m_get_p2m_idx 14
+/* Set the "Supress #VE" bit for a range of pages */
+#define HVMOP_altp2m_set_suppress_ve_multi 15
+/* Set visibility for a given altp2m view */
+#define HVMOP_altp2m_set_visibility 16
+ domid_t domain;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ struct xen_hvm_altp2m_domain_state domain_state;
+ struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+ struct xen_hvm_altp2m_view view;
+#if __XEN_INTERFACE_VERSION__ < 0x00040a00
+ struct xen_hvm_altp2m_set_mem_access set_mem_access;
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
+ struct xen_hvm_altp2m_mem_access mem_access;
+ struct xen_hvm_altp2m_change_gfn change_gfn;
+ struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
+ struct xen_hvm_altp2m_suppress_ve suppress_ve;
+ struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
+ struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
+ struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
+ struct xen_hvm_altp2m_set_visibility set_visibility;
+ uint8_t pad[64];
+ } u;
+};
+typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
+
+#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/hvm/params.h b/include/hw/xen/interface/hvm/params.h
new file mode 100644
index 0000000000..a22b4ed45d
--- /dev/null
+++ b/include/hw/xen/interface/hvm/params.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2007, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
+#define __XEN_PUBLIC_HVM_PARAMS_H__
+
+#include "hvm_op.h"
+
+/* These parameters are deprecated and their meaning is undefined. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#define HVM_PARAM_PAE_ENABLED 4
+#define HVM_PARAM_DM_DOMAIN 13
+#define HVM_PARAM_MEMORY_EVENT_CR0 20
+#define HVM_PARAM_MEMORY_EVENT_CR3 21
+#define HVM_PARAM_MEMORY_EVENT_CR4 22
+#define HVM_PARAM_MEMORY_EVENT_INT3 23
+#define HVM_PARAM_NESTEDHVM 24
+#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
+#define HVM_PARAM_MEMORY_EVENT_MSR 30
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/*
+ * Parameter space for HVMOP_{set,get}_param.
+ */
+
+#define HVM_PARAM_CALLBACK_IRQ 0
+#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000)
+/*
+ * How should CPU0 event-channel notifications be delivered?
+ *
+ * If val == 0 then CPU0 event-channel notifications are not delivered.
+ * If val != 0, val[63:56] encodes the type, as follows:
+ */
+
+#define HVM_PARAM_CALLBACK_TYPE_GSI 0
+/*
+ * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
+ * and disables all notifications.
+ */
+
+#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
+/*
+ * val[55:0] is a delivery PCI INTx line:
+ * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
+/*
+ * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
+ * if this delivery method is available.
+ */
+#elif defined(__arm__) || defined(__aarch64__)
+#define HVM_PARAM_CALLBACK_TYPE_PPI 2
+/*
+ * val[55:16] needs to be zero.
+ * val[15:8] is interrupt flag of the PPI used by event-channel:
+ * bit 8: the PPI is edge(1) or level(0) triggered
+ * bit 9: the PPI is active low(1) or high(0)
+ * val[7:0] is a PPI number used by event-channel.
+ * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
+ * the notification is handled by the interrupt controller.
+ */
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2
+#endif
+
+/*
+ * These are not used by Xen. They are here for convenience of HVM-guest
+ * xenbus implementations.
+ */
+#define HVM_PARAM_STORE_PFN 1
+#define HVM_PARAM_STORE_EVTCHN 2
+
+#define HVM_PARAM_IOREQ_PFN 5
+
+#define HVM_PARAM_BUFIOREQ_PFN 6
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * Viridian enlightenments
+ *
+ * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx)
+ *
+ * To expose viridian enlightenments to the guest set this parameter
+ * to the desired feature mask. The base feature set must be present
+ * in any valid feature mask.
+ */
+#define HVM_PARAM_VIRIDIAN 9
+
+/* Base+Freq viridian feature sets:
+ *
+ * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL)
+ * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
+ * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX)
+ * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY)
+ */
+#define _HVMPV_base_freq 0
+#define HVMPV_base_freq (1 << _HVMPV_base_freq)
+
+/* Feature set modifications */
+
+/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY).
+ * This modification restores the viridian feature set to the
+ * original 'base' set exposed in releases prior to Xen 4.4.
+ */
+#define _HVMPV_no_freq 1
+#define HVMPV_no_freq (1 << _HVMPV_no_freq)
+
+/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
+#define _HVMPV_time_ref_count 2
+#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
+
+/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
+#define _HVMPV_reference_tsc 3
+#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
+
+/* Use Hypercall for remote TLB flush */
+#define _HVMPV_hcall_remote_tlb_flush 4
+#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
+
+/* Use APIC assist */
+#define _HVMPV_apic_assist 5
+#define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
+
+/* Enable crash MSRs */
+#define _HVMPV_crash_ctl 6
+#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
+
+/* Enable SYNIC MSRs */
+#define _HVMPV_synic 7
+#define HVMPV_synic (1 << _HVMPV_synic)
+
+/* Enable STIMER MSRs */
+#define _HVMPV_stimer 8
+#define HVMPV_stimer (1 << _HVMPV_stimer)
+
+/* Use Synthetic Cluster IPI Hypercall */
+#define _HVMPV_hcall_ipi 9
+#define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi)
+
+/* Enable ExProcessorMasks */
+#define _HVMPV_ex_processor_masks 10
+#define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks)
+
+/* Allow more than 64 VPs */
+#define _HVMPV_no_vp_limit 11
+#define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit)
+
+/* Enable vCPU hotplug */
+#define _HVMPV_cpu_hotplug 12
+#define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug)
+
+#define HVMPV_feature_mask \
+ (HVMPV_base_freq | \
+ HVMPV_no_freq | \
+ HVMPV_time_ref_count | \
+ HVMPV_reference_tsc | \
+ HVMPV_hcall_remote_tlb_flush | \
+ HVMPV_apic_assist | \
+ HVMPV_crash_ctl | \
+ HVMPV_synic | \
+ HVMPV_stimer | \
+ HVMPV_hcall_ipi | \
+ HVMPV_ex_processor_masks | \
+ HVMPV_no_vp_limit | \
+ HVMPV_cpu_hotplug)
+
+#endif
+
+/*
+ * Set mode for virtual timers (currently x86 only):
+ * delay_for_missed_ticks (default):
+ * Do not advance a vcpu's time beyond the correct delivery time for
+ * interrupts that have been missed due to preemption. Deliver missed
+ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
+ * time stepwise for each one.
+ * no_delay_for_missed_ticks:
+ * As above, missed interrupts are delivered, but guest time always tracks
+ * wallclock (i.e., real) time while doing so.
+ * no_missed_ticks_pending:
+ * No missed interrupts are held pending. Instead, to ensure ticks are
+ * delivered at some non-zero rate, if we detect missed ticks then the
+ * internal tick alarm is not disabled if the VCPU is preempted during the
+ * next tick period.
+ * one_missed_tick_pending:
+ * Missed interrupts are collapsed together and delivered as one 'late tick'.
+ * Guest time always tracks wallclock (i.e., real) time.
+ */
+#define HVM_PARAM_TIMER_MODE 10
+#define HVMPTM_delay_for_missed_ticks 0
+#define HVMPTM_no_delay_for_missed_ticks 1
+#define HVMPTM_no_missed_ticks_pending 2
+#define HVMPTM_one_missed_tick_pending 3
+
+/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
+#define HVM_PARAM_HPET_ENABLED 11
+
+/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
+#define HVM_PARAM_IDENT_PT 12
+
+/* ACPI S state: currently support S0 and S3 on x86. */
+#define HVM_PARAM_ACPI_S_STATE 14
+
+/* TSS used on Intel when CR0.PE=0. */
+#define HVM_PARAM_VM86_TSS 15
+
+/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
+#define HVM_PARAM_VPT_ALIGN 16
+
+/* Console debug shared memory ring and event channel */
+#define HVM_PARAM_CONSOLE_PFN 17
+#define HVM_PARAM_CONSOLE_EVTCHN 18
+
+/*
+ * Select location of ACPI PM1a and TMR control blocks. Currently two locations
+ * are supported, specified by version 0 or 1 in this parameter:
+ * - 0: default, use the old addresses
+ * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48
+ * - 1: use the new default qemu addresses
+ * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
+ * You can find these address definitions in <hvm/ioreq.h>
+ */
+#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
+
+/* Params for the mem event rings */
+#define HVM_PARAM_PAGING_RING_PFN 27
+#define HVM_PARAM_MONITOR_RING_PFN 28
+#define HVM_PARAM_SHARING_RING_PFN 29
+
+/* SHUTDOWN_* action in case of a triple fault */
+#define HVM_PARAM_TRIPLE_FAULT_REASON 31
+
+#define HVM_PARAM_IOREQ_SERVER_PFN 32
+#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
+
+/* Location of the VM Generation ID in guest physical address space. */
+#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
+
+/*
+ * Set mode for altp2m:
+ * disabled: don't activate altp2m (default)
+ * mixed: allow access to all altp2m ops for both in-guest and external tools
+ * external: allow access to external privileged tools only
+ * limited: guest only has limited access (ie. control VMFUNC and #VE)
+ *
+ * Note that 'mixed' mode has not been evaluated for safety from a
+ * security perspective. Before using this mode in a
+ * security-critical environment, each subop should be evaluated for
+ * safety, with unsafe subops blacklisted in XSM.
+ */
+#define HVM_PARAM_ALTP2M 35
+#define XEN_ALTP2M_disabled 0
+#define XEN_ALTP2M_mixed 1
+#define XEN_ALTP2M_external 2
+#define XEN_ALTP2M_limited 3
+
+/*
+ * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
+ * save/restore. This is a workaround for a hardware limitation that
+ * does not allow the full FIP/FDP and FCS/FDS to be restored.
+ *
+ * Valid values are:
+ *
+ * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU
+ * has FPCSDS feature).
+ *
+ * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of
+ * FIP/FDP.
+ *
+ * 0: allow hypervisor to choose based on the value of FIP/FDP
+ * (default if CPU does not have FPCSDS).
+ *
+ * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU
+ * never saves FCS/FDS and this parameter should be left at the
+ * default of 8.
+ */
+#define HVM_PARAM_X87_FIP_WIDTH 36
+
+/*
+ * TSS (and its size) used on Intel when CR0.PE=0. The address occupies
+ * the low 32 bits, while the size is in the high 32 ones.
+ */
+#define HVM_PARAM_VM86_TSS_SIZED 37
+
+/* Enable MCA capabilities. */
+#define HVM_PARAM_MCA_CAP 38
+#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0)
+#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE
+
+#define HVM_NR_PARAMS 39
+
+#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h
index d07fa1e078..22f1eef0c0 100644
--- a/include/hw/xen/interface/io/blkif.h
+++ b/include/hw/xen/interface/io/blkif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* blkif.h
*
* Unified block-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2003-2004, Keir Fraser
* Copyright (c) 2012, Spectra Logic Corporation
*/
@@ -118,7 +101,7 @@
*
* The underlying storage is not affected by the direct IO memory
* lifetime bug. See:
- * http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
+ * https://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
*
* Therefore this option gives the backend permission to use
* O_DIRECT, notwithstanding that bug.
@@ -341,7 +324,7 @@
* access (even when it should be read-only). If the frontend hits the
* maximum number of allowed persistently mapped grants, it can fallback
* to non persistent mode. This will cause a performance degradation,
- * since the backend driver will still try to map those grants
+ * since the the backend driver will still try to map those grants
* persistently. Since the persistent grants protocol is compatible with
* the previous protocol, a frontend driver can choose to work in
* persistent mode even when the backend doesn't support it.
@@ -363,6 +346,14 @@
* that the frontend requires that the logical block size is 512 as it
* is hardcoded (which is the case in some frontend implementations).
*
+ * trusted
+ * Values: 0/1 (boolean)
+ * Default value: 1
+ *
+ * A value of "0" indicates that the frontend should not trust the
+ * backend, and should deploy whatever measures available to protect from
+ * a malicious backend on the other end.
+ *
*------------------------- Virtual Device Properties -------------------------
*
* device-type
@@ -710,3 +701,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
#define VDISK_READONLY 0x4
#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/console.h b/include/hw/xen/interface/io/console.h
index e2155d1cf5..4509b4b689 100644
--- a/include/hw/xen/interface/io/console.h
+++ b/include/hw/xen/interface/io/console.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* console.h
*
* Console I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2005, Keir Fraser
*/
@@ -44,3 +27,13 @@ DEFINE_XEN_FLEX_RING(xencons);
#endif
#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/fbif.h b/include/hw/xen/interface/io/fbif.h
index ea87ebec0a..93c73195d8 100644
--- a/include/hw/xen/interface/io/fbif.h
+++ b/include/hw/xen/interface/io/fbif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* fbif.h -- Xen virtual frame buffer device
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
@@ -153,4 +136,24 @@ struct xenfb_page
unsigned long pd[256];
};
+/*
+ * Wart: xenkbd needs to know default resolution. Put it here until a
+ * better solution is found, but don't leak it to the backend.
+ */
+#ifdef __KERNEL__
+#define XENFB_WIDTH 800
+#define XENFB_HEIGHT 600
+#define XENFB_DEPTH 32
+#endif
+
#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/kbdif.h b/include/hw/xen/interface/io/kbdif.h
index 1d68cd458e..4bde6b3821 100644
--- a/include/hw/xen/interface/io/kbdif.h
+++ b/include/hw/xen/interface/io/kbdif.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
* kbdif.h -- Xen virtual keyboard/mouse
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
@@ -564,3 +547,13 @@ struct xenkbd_page
};
#endif /* __XEN_PUBLIC_IO_KBDIF_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/netif.h b/include/hw/xen/interface/io/netif.h
index 48fa530950..c13b85061d 100644
--- a/include/hw/xen/interface/io/netif.h
+++ b/include/hw/xen/interface/io/netif.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2003-2004, Keir Fraser
*/
@@ -161,6 +144,12 @@
*/
/*
+ * The setting of "trusted" node to "0" in the frontend path signals that the
+ * frontend should not trust the backend, and should deploy whatever measures
+ * available to protect from a malicious backend on the other end.
+ */
+
+/*
* Control ring
* ============
*
@@ -171,7 +160,7 @@
* The ability of the backend to use a control ring is advertised by
* setting:
*
- * /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1"
+ * /local/domain/X/backend/vif/<domid>/<vif>/feature-ctrl-ring = "1"
*
* The frontend provides a control ring to the backend by setting:
*
@@ -191,6 +180,32 @@
*/
/*
+ * Link state
+ * ==========
+ *
+ * The backend can advertise its current link (carrier) state to the
+ * frontend using the /local/domain/X/backend/vif/<domid>/<vif>/carrier
+ * node. If this node is not present, then the frontend should assume that
+ * the link is up (for compatibility with backends that do not implement
+ * this feature). If this node is present, then a value of "0" should be
+ * interpreted by the frontend as the link being down (no carrier) and a
+ * value of "1" should be interpreted as the link being up (carrier
+ * present).
+ */
+
+/*
+ * MTU
+ * ===
+ *
+ * The toolstack may set a value of MTU for the frontend by setting the
+ * /local/domain/<domid>/device/vif/<vif>/mtu node with the MTU value in
+ * octets. If this node is absent the frontend should assume an MTU value
+ * of 1500 octets. A frontend is also at liberty to ignore this value so
+ * it is only suitable for informing the frontend that a packet payload
+ * >1500 octets is permitted.
+ */
+
+/*
* Hash types
* ==========
*
@@ -268,6 +283,62 @@
#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
/*
+ * This algorithm uses a 'key' as well as the data buffer itself.
+ * (Buffer[] and Key[] are treated as shift-registers where the MSB of
+ * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
+ * is the 'right-most').
+ *
+ * Value = 0
+ * For number of bits in Buffer[]
+ * If (left-most bit of Buffer[] is 1)
+ * Value ^= left-most 32 bits of Key[]
+ * Key[] << 1
+ * Buffer[] << 1
+ *
+ * The code below is provided for convenience where an operating system
+ * does not already provide an implementation.
+ */
+#ifdef XEN_NETIF_DEFINE_TOEPLITZ
+static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
+ unsigned int keylen,
+ const uint8_t *buf,
+ unsigned int buflen)
+{
+ unsigned int keyi, bufi;
+ uint64_t prefix = 0;
+ uint64_t hash = 0;
+
+ /* Pre-load prefix with the first 8 bytes of the key */
+ for (keyi = 0; keyi < 8; keyi++) {
+ prefix <<= 8;
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ }
+
+ for (bufi = 0; bufi < buflen; bufi++) {
+ uint8_t byte = buf[bufi];
+ unsigned int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ prefix <<= 1;
+ byte <<=1;
+ }
+
+ /*
+ * 'prefix' has now been left-shifted by 8, so
+ * OR in the next byte.
+ */
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ keyi++;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return hash >> 32;
+}
+#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
+
+/*
* Control requests (struct xen_netif_ctrl_request)
* ================================================
*
@@ -1008,3 +1079,13 @@ DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
#define NETIF_RSP_NULL 1
#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/protocols.h b/include/hw/xen/interface/io/protocols.h
index 52b4de0f81..7815e1ff0f 100644
--- a/include/hw/xen/interface/io/protocols.h
+++ b/include/hw/xen/interface/io/protocols.h
@@ -1,24 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* protocols.h
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (c) 2008, Keir Fraser
*/
diff --git a/include/hw/xen/interface/io/ring.h b/include/hw/xen/interface/io/ring.h
index 5d048b335c..025939278b 100644
--- a/include/hw/xen/interface/io/ring.h
+++ b/include/hw/xen/interface/io/ring.h
@@ -1,25 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/******************************************************************************
* ring.h
- *
- * Shared producer-consumer ring macros.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * Shared producer-consumer ring macros.
*
* Tim Deegan and Andrew Warfield November 2004.
*/
@@ -33,13 +16,6 @@
* - standard integers types (uint8_t, uint16_t, etc)
* They are provided by stdint.h of the standard headers.
*
- * Before using the different macros, you need to provide the following
- * macros:
- * - xen_mb() a memory barrier
- * - xen_rmb() a read memory barrier
- * - xen_wmb() a write memory barrier
- * Example of those can be found in xenctrl.h.
- *
* In addition, if you intend to use the FLEX macros, you also need to
* provide the following, before invoking the FLEX macros:
* - size_t
@@ -49,6 +25,14 @@
* and grant_table.h from the Xen public headers.
*/
+#include "../xen-compat.h"
+
+#if __XEN_INTERFACE_VERSION__ < 0x00030208
+#define xen_mb() mb()
+#define xen_rmb() rmb()
+#define xen_wmb() wmb()
+#endif
+
typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */
@@ -61,12 +45,12 @@ typedef unsigned int RING_IDX;
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
- * A ring contains as many entries as will fit, rounded down to the nearest
+ * A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
#define __CONST_RING_SIZE(_s, _sz) \
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
- sizeof_field(struct _s##_sring, ring[0])))
+ sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
@@ -75,7 +59,7 @@ typedef unsigned int RING_IDX;
/*
* Macros to make the correct C datatypes for a new kind of ring.
- *
+ *
* To make a new ring datatype, you need to have two message structures,
* let's say request_t, and response_t already defined.
*
@@ -85,7 +69,7 @@ typedef unsigned int RING_IDX;
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
- *
+ *
* mytag_sring_t - The shared ring.
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
@@ -94,9 +78,8 @@ typedef unsigned int RING_IDX;
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
- * mytag_front_ring_t front_ring;
- * SHARED_RING_INIT((mytag_sring_t *)shared_page);
- * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+ * mytag_front_ring_t ring;
+ * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
@@ -153,15 +136,15 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Macros for manipulating rings.
- *
- * FRONT_RING_whatever works on the "front end" of a ring: here
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
* requests are pushed on to the ring and responses taken off it.
- *
- * BACK_RING_whatever works on the "back end" of a ring: here
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
* requests are taken off the ring and responses put on.
- *
- * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
- * This is OK in 1-for-1 request-response situations where the
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
* requestor (front end) never has more than RING_SIZE()-1
* outstanding requests.
*/
@@ -174,20 +157,29 @@ typedef struct __name##_back_ring __name##_back_ring_t
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
-#define FRONT_RING_INIT(_r, _s, __size) do { \
- (_r)->req_prod_pvt = 0; \
- (_r)->rsp_cons = 0; \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
-#define BACK_RING_INIT(_r, _s, __size) do { \
- (_r)->rsp_prod_pvt = 0; \
- (_r)->req_cons = 0; \
+#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
+
+#define XEN_FRONT_RING_INIT(r, s, size) do { \
+ SHARED_RING_INIT(s); \
+ FRONT_RING_INIT(r, s, size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
+#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
+
/* How big is this ring? */
#define RING_SIZE(_r) \
((_r)->nr_ents)
@@ -203,11 +195,11 @@ typedef struct __name##_back_ring __name##_back_ring_t
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+#define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
#ifdef __GNUC__
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
unsigned int rsp = RING_SIZE(_r) - \
((_r)->req_cons - (_r)->rsp_prod_pvt); \
@@ -215,33 +207,50 @@ typedef struct __name##_back_ring __name##_back_ring_t
})
#else
/* Same as above, but without the nice GCC ({ ... }) syntax. */
-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+#define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) \
((((_r)->sring->req_prod - (_r)->req_cons) < \
(RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
((_r)->sring->req_prod - (_r)->req_cons) : \
(RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
#endif
+#ifdef XEN_RING_HAS_UNCONSUMED_IS_BOOL
+/*
+ * These variants should only be used in case no caller is abusing them for
+ * obtaining the number of unconsumed responses/requests.
+ */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r))
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r))
+#else
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) XEN_RING_NR_UNCONSUMED_RESPONSES(_r)
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) XEN_RING_NR_UNCONSUMED_REQUESTS(_r)
+#endif
+
/* Direct access to individual ring elements, by index. */
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
/*
- * Get a local copy of a request.
+ * Get a local copy of a request/response.
*
- * Use this in preference to RING_GET_REQUEST() so all processing is
+ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
- * to be ineffective where _req is a struct which consists of only bitfields.
+ * to be ineffective where dest is a struct which consists of only bitfields.
*/
-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
- /* Use volatile to force the copy into _req. */ \
- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+#define RING_COPY_(type, r, idx, dest) do { \
+ /* Use volatile to force the copy into dest. */ \
+ *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \
} while (0)
-#define RING_GET_RESPONSE(_r, _idx) \
- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
+#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
@@ -251,6 +260,10 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+/* Ill-behaved backend determination: Can there be this many responses? */
+#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
+
#define RING_PUSH_REQUESTS(_r) do { \
xen_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
@@ -263,26 +276,26 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Notification hold-off (req_event and rsp_event):
- *
+ *
* When queueing requests or responses on a shared ring, it may not always be
* necessary to notify the remote end. For example, if requests are in flight
* in a backend, the front may be able to queue further requests without
* notifying the back (if the back checks for new requests when it queues
* responses).
- *
+ *
* When enqueuing requests or responses:
- *
+ *
* Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
* is a boolean return value. True indicates that the receiver requires an
* asynchronous notification.
- *
+ *
* After dequeuing requests or responses (before sleeping the connection):
- *
+ *
* Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
* The second argument is a boolean return value. True indicates that there
* are pending messages on the ring (i.e., the connection should not be put
* to sleep).
- *
+ *
* These macros will set the req_event/rsp_event field to trigger a
* notification on the very next message that is enqueued. If you want to
* create batches of work (i.e., only receive a notification after several
diff --git a/include/hw/xen/interface/io/usbif.h b/include/hw/xen/interface/io/usbif.h
index c6a58639d6..875af0dc7c 100644
--- a/include/hw/xen/interface/io/usbif.h
+++ b/include/hw/xen/interface/io/usbif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* usbif.h
*
@@ -5,24 +6,6 @@
*
* Copyright (C) 2009, FUJITSU LABORATORIES LTD.
* Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef __XEN_PUBLIC_IO_USBIF_H__
@@ -32,6 +15,34 @@
#include "../grant_table.h"
/*
+ * Detailed Interface Description
+ * ==============================
+ * The pvUSB interface is using a split driver design: a frontend driver in
+ * the guest and a backend driver in a driver domain (normally dom0) having
+ * access to the physical USB device(s) being passed to the guest.
+ *
+ * The frontend and backend drivers use XenStore to initiate the connection
+ * between them, the I/O activity is handled via two shared ring pages and an
+ * event channel. As the interface between frontend and backend is at the USB
+ * host connector level, multiple (up to 31) physical USB devices can be
+ * handled by a single connection.
+ *
+ * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are
+ * to be found under "device/qusb", while the backend's XenStore entries are
+ * under "backend/<guest-dom-id>/qusb".
+ *
+ * When a new pvUSB connection is established, the frontend needs to setup the
+ * two shared ring pages for communication and the event channel. The ring
+ * pages need to be made available to the backend via the grant table
+ * interface.
+ *
+ * One of the shared ring pages is used by the backend to inform the frontend
+ * about USB device plug events (device to be added or removed). This is the
+ * "conn-ring".
+ *
+ * The other ring page is used for USB I/O communication (requests and
+ * responses). This is the "urb-ring".
+ *
* Feature and Parameter Negotiation
* =================================
* The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
@@ -99,130 +110,273 @@
* The machine ABI rules governing the format of all ring request and
* response structures.
*
+ * Protocol Description
+ * ====================
+ *
+ *-------------------------- USB device plug events --------------------------
+ *
+ * USB device plug events are send via the "conn-ring" shared page. As only
+ * events are being sent, the respective requests from the frontend to the
+ * backend are just dummy ones.
+ * The events sent to the frontend have the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | portnum | speed | 4
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, event id (taken from the actual frontend dummy request)
+ * portnum - uint8_t, port number (1 ... 31)
+ * speed - uint8_t, device USBIF_SPEED_*, USBIF_SPEED_NONE == unplug
+ *
+ * The dummy request:
+ * 0 1 octet
+ * +----------------+----------------+
+ * | id | 2
+ * +----------------+----------------+
+ * id - uint16_t, guest supplied value (no need for being unique)
+ *
+ *-------------------------- USB I/O request ---------------------------------
+ *
+ * A single USB I/O request on the "urb-ring" has the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | nr_buffer_segs | 4
+ * +----------------+----------------+----------------+----------------+
+ * | pipe | 8
+ * +----------------+----------------+----------------+----------------+
+ * | transfer_flags | buffer_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | request type specific | 16
+ * | data | 20
+ * +----------------+----------------+----------------+----------------+
+ * | seg[0] | 24
+ * | data | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | seg[USBIF_MAX_SEGMENTS_PER_REQUEST - 1] | 144
+ * | data | 148
+ * +----------------+----------------+----------------+----------------+
+ * Bit field bit number 0 is always least significant bit, undefined bits must
+ * be zero.
+ * id - uint16_t, guest supplied value
+ * nr_buffer_segs - uint16_t, number of segment entries in seg[] array
+ * pipe - uint32_t, bit field with multiple information:
+ * bits 0-4: port request to send to
+ * bit 5: unlink request with specified id (cancel I/O) if set (see below)
+ * bit 7: direction (1 = read from device)
+ * bits 8-14: device number on port
+ * bits 15-18: endpoint of device
+ * bits 30-31: request type: 00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk
+ * transfer_flags - uint16_t, bit field with processing flags:
+ * bit 0: less data than specified allowed
+ * buffer_length - uint16_t, total length of data
+ * request type specific data - 8 bytes, see below
+ * seg[] - array with 8 byte elements, see below
+ *
+ * Request type specific data for isochronous request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | number_of_packets | nr_frame_desc_segs | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time interval in msecs between frames
+ * start_frame - uint16_t, start frame number
+ * number_of_packets - uint16_t, number of packets to transfer
+ * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements
+ *
+ * Request type specific data for interrupt request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time in msecs until interruption
+ *
+ * Request type specific data for control request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | data of setup packet | 4
+ * | | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for bulk request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 4
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for unlink request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | unlink_id | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * unlink_id - uint16_t, request id of request to terminate
+ *
+ * seg[] array element layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref | 4
+ * +----------------+----------------+----------------+----------------+
+ * | offset | length | 8
+ * +----------------+----------------+----------------+----------------+
+ * gref - uint32_t, grant reference of buffer page
+ * offset - uint16_t, offset of buffer start in page
+ * length - uint16_t, length of buffer in page
+ *
+ *-------------------------- USB I/O response --------------------------------
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | actual_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | error_count | 16
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, id of the request this response belongs to
+ * start_frame - uint16_t, start_frame this response (iso requests only)
+ * status - int32_t, USBIF_STATUS_* (non-iso requests)
+ * actual_length - uint32_t, actual size of data transferred
+ * error_count - uint32_t, number of errors (iso requests)
*/
enum usb_spec_version {
- USB_VER_UNKNOWN = 0,
- USB_VER_USB11,
- USB_VER_USB20,
- USB_VER_USB30, /* not supported yet */
+ USB_VER_UNKNOWN = 0,
+ USB_VER_USB11,
+ USB_VER_USB20,
+ USB_VER_USB30, /* not supported yet */
};
/*
* USB pipe in usbif_request
*
- * - port number: bits 0-4
- * (USB_MAXCHILDREN is 31)
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
*
- * - operation flag: bit 5
- * (0 = submit urb,
- * 1 = unlink urb)
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
*
- * - direction: bit 7
- * (0 = Host-to-Device [Out]
- * 1 = Device-to-Host [In])
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
*
- * - device address: bits 8-14
+ * - device address: bits 8-14
*
- * - endpoint: bits 15-18
+ * - endpoint: bits 15-18
*
- * - pipe type: bits 30-31
- * (00 = isochronous, 01 = interrupt,
- * 10 = control, 11 = bulk)
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
*/
-#define USBIF_PIPE_PORT_MASK 0x0000001f
-#define USBIF_PIPE_UNLINK 0x00000020
-#define USBIF_PIPE_DIR 0x00000080
-#define USBIF_PIPE_DEV_MASK 0x0000007f
-#define USBIF_PIPE_DEV_SHIFT 8
-#define USBIF_PIPE_EP_MASK 0x0000000f
-#define USBIF_PIPE_EP_SHIFT 15
-#define USBIF_PIPE_TYPE_MASK 0x00000003
-#define USBIF_PIPE_TYPE_SHIFT 30
-#define USBIF_PIPE_TYPE_ISOC 0
-#define USBIF_PIPE_TYPE_INT 1
-#define USBIF_PIPE_TYPE_CTRL 2
-#define USBIF_PIPE_TYPE_BULK 3
-
-#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK)
-#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
-
-#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK)
-#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
-#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK)
-
-#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR)
-#define usbif_pipeout(pipe) (!usbif_pipein(pipe))
-
-#define usbif_pipedevice(pipe) \
- (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK)
-
-#define usbif_pipeendpoint(pipe) \
- (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK)
-
-#define usbif_pipetype(pipe) \
- (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK)
-#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC)
-#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT)
-#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL)
-#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK)
+#define USBIF_PIPE_PORT_MASK 0x0000001f
+#define USBIF_PIPE_UNLINK 0x00000020
+#define USBIF_PIPE_DIR 0x00000080
+#define USBIF_PIPE_DEV_MASK 0x0000007f
+#define USBIF_PIPE_DEV_SHIFT 8
+#define USBIF_PIPE_EP_MASK 0x0000000f
+#define USBIF_PIPE_EP_SHIFT 15
+#define USBIF_PIPE_TYPE_MASK 0x00000003
+#define USBIF_PIPE_TYPE_SHIFT 30
+#define USBIF_PIPE_TYPE_ISOC 0
+#define USBIF_PIPE_TYPE_INT 1
+#define USBIF_PIPE_TYPE_CTRL 2
+#define USBIF_PIPE_TYPE_BULK 3
+
+#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK)
+#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK)
+#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
+#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK)
+
+#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR)
+#define usbif_pipeout(pipe) (!usbif_pipein(pipe))
+
+#define usbif_pipedevice(pipe) \
+ (((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK)
+
+#define usbif_pipeendpoint(pipe) \
+ (((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK)
+
+#define usbif_pipetype(pipe) \
+ (((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK)
+#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC)
+#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT)
+#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL)
+#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK)
#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
-#define USBIF_MAX_PORTNR 31
-#define USBIF_RING_SIZE 4096
+#define USBIF_MAX_PORTNR 31
+#define USBIF_RING_SIZE 4096
/*
* RING for transferring urbs.
*/
struct usbif_request_segment {
- grant_ref_t gref;
- uint16_t offset;
- uint16_t length;
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
};
struct usbif_urb_request {
- uint16_t id; /* request id */
- uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
-
- /* basic urb parameter */
- uint32_t pipe;
- uint16_t transfer_flags;
-#define USBIF_SHORT_NOT_OK 0x0001
- uint16_t buffer_length;
- union {
- uint8_t ctrl[8]; /* setup_packet (Ctrl) */
-
- struct {
- uint16_t interval; /* maximum (1024*8) in usb core */
- uint16_t start_frame; /* start frame */
- uint16_t number_of_packets; /* number of ISO packet */
- uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
- } isoc;
-
- struct {
- uint16_t interval; /* maximum (1024*8) in usb core */
- uint16_t pad[3];
- } intr;
-
- struct {
- uint16_t unlink_id; /* unlink request id */
- uint16_t pad[3];
- } unlink;
-
- } u;
-
- /* urb data segments */
- struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST];
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+#define USBIF_SHORT_NOT_OK 0x0001
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST];
};
typedef struct usbif_urb_request usbif_urb_request_t;
struct usbif_urb_response {
- uint16_t id; /* request id */
- uint16_t start_frame; /* start frame (ISO) */
- int32_t status; /* status (non-ISO) */
- int32_t actual_length; /* actual transfer length */
- int32_t error_count; /* number of ISO errors */
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+#define USBIF_STATUS_OK 0
+#define USBIF_STATUS_NODEV (-19)
+#define USBIF_STATUS_INVAL (-22)
+#define USBIF_STATUS_STALL (-32)
+#define USBIF_STATUS_IOERROR (-71)
+#define USBIF_STATUS_BABBLE (-75)
+#define USBIF_STATUS_SHUTDOWN (-108)
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
};
typedef struct usbif_urb_response usbif_urb_response_t;
@@ -233,18 +387,18 @@ DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response
* RING for notifying connect/disconnect events to frontend
*/
struct usbif_conn_request {
- uint16_t id;
+ uint16_t id;
};
typedef struct usbif_conn_request usbif_conn_request_t;
struct usbif_conn_response {
- uint16_t id; /* request id */
- uint8_t portnum; /* port number */
- uint8_t speed; /* usb_device_speed */
-#define USBIF_SPEED_NONE 0
-#define USBIF_SPEED_LOW 1
-#define USBIF_SPEED_FULL 2
-#define USBIF_SPEED_HIGH 3
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+#define USBIF_SPEED_NONE 0
+#define USBIF_SPEED_LOW 1
+#define USBIF_SPEED_FULL 2
+#define USBIF_SPEED_HIGH 3
};
typedef struct usbif_conn_response usbif_conn_response_t;
diff --git a/include/hw/xen/interface/io/xenbus.h b/include/hw/xen/interface/io/xenbus.h
index 2fbf2a7fdc..9cd0cd7c67 100644
--- a/include/hw/xen/interface/io/xenbus.h
+++ b/include/hw/xen/interface/io/xenbus.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: MIT */
/*****************************************************************************
* xenbus.h
*
* Xenbus protocol details.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Copyright (C) 2005 XenSource Ltd.
*/
@@ -68,3 +51,13 @@ enum xenbus_state {
typedef enum xenbus_state XenbusState;
#endif /* _XEN_PUBLIC_IO_XENBUS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/io/xs_wire.h b/include/hw/xen/interface/io/xs_wire.h
new file mode 100644
index 0000000000..04e6849feb
--- /dev/null
+++ b/include/hw/xen/interface/io/xs_wire.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Details of the "wire" protocol between Xen Store Daemon and client
+ * library or guest kernel.
+ *
+ * Copyright (C) 2005 Rusty Russell IBM Corporation
+ */
+
+#ifndef _XS_WIRE_H
+#define _XS_WIRE_H
+
+enum xsd_sockmsg_type
+{
+ XS_CONTROL,
+#define XS_DEBUG XS_CONTROL
+ XS_DIRECTORY,
+ XS_READ,
+ XS_GET_PERMS,
+ XS_WATCH,
+ XS_UNWATCH,
+ XS_TRANSACTION_START,
+ XS_TRANSACTION_END,
+ XS_INTRODUCE,
+ XS_RELEASE,
+ XS_GET_DOMAIN_PATH,
+ XS_WRITE,
+ XS_MKDIR,
+ XS_RM,
+ XS_SET_PERMS,
+ XS_WATCH_EVENT,
+ XS_ERROR,
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ /* XS_RESTRICT has been removed */
+ XS_RESET_WATCHES = XS_SET_TARGET + 2,
+ XS_DIRECTORY_PART,
+
+ XS_TYPE_COUNT, /* Number of valid types. */
+
+ XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
+};
+
+#define XS_WRITE_NONE "NONE"
+#define XS_WRITE_CREATE "CREATE"
+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
+
+/* We hand errors as strings, for portability. */
+struct xsd_errors
+{
+ int errnum;
+ const char *errstring;
+};
+#ifdef EINVAL
+#define XSD_ERROR(x) { x, #x }
+/* LINTED: static unused */
+static const struct xsd_errors xsd_errors[]
+#if defined(__GNUC__)
+__attribute__((unused))
+#endif
+ = {
+ /* /!\ New errors should be added at the end of the array. */
+ XSD_ERROR(EINVAL),
+ XSD_ERROR(EACCES),
+ XSD_ERROR(EEXIST),
+ XSD_ERROR(EISDIR),
+ XSD_ERROR(ENOENT),
+ XSD_ERROR(ENOMEM),
+ XSD_ERROR(ENOSPC),
+ XSD_ERROR(EIO),
+ XSD_ERROR(ENOTEMPTY),
+ XSD_ERROR(ENOSYS),
+ XSD_ERROR(EROFS),
+ XSD_ERROR(EBUSY),
+ XSD_ERROR(EAGAIN),
+ XSD_ERROR(EISCONN),
+ XSD_ERROR(E2BIG),
+ XSD_ERROR(EPERM),
+};
+#endif
+
+struct xsd_sockmsg
+{
+ uint32_t type; /* XS_??? */
+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+ uint32_t len; /* Length of data following this. */
+
+ /* Generally followed by nul-terminated string(s). */
+};
+
+enum xs_watch_type
+{
+ XS_WATCH_PATH = 0,
+ XS_WATCH_TOKEN
+};
+
+/*
+ * `incontents 150 xenstore_struct XenStore wire protocol.
+ *
+ * Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+ XENSTORE_RING_IDX req_cons, req_prod;
+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ uint32_t server_features; /* Bitmap of features supported by the server */
+ uint32_t connection;
+ uint32_t error;
+};
+
+/* Violating this is very bad. See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
+/* The ability to reconnect a ring */
+#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
+/* The presence of the "error" field in the ring page */
+#define XENSTORE_SERVER_FEATURE_ERROR 2
+
+/* Valid values for the connection field */
+#define XENSTORE_CONNECTED 0 /* the steady-state */
+#define XENSTORE_RECONNECT 1 /* reconnect in progress */
+
+/* Valid values for the error field */
+#define XENSTORE_ERROR_NONE 0 /* No error */
+#define XENSTORE_ERROR_COMM 1 /* Communication problem */
+#define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */
+#define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */
+
+#endif /* _XS_WIRE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/memory.h b/include/hw/xen/interface/memory.h
new file mode 100644
index 0000000000..29cf5c8239
--- /dev/null
+++ b/include/hw/xen/interface/memory.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * memory.h
+ *
+ * Memory reservation and information.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_MEMORY_H__
+#define __XEN_PUBLIC_MEMORY_H__
+
+#include "xen.h"
+#include "physdev.h"
+
+/*
+ * Increase or decrease the specified domain's memory reservation. Returns the
+ * number of extents successfully allocated or freed.
+ * arg == addr of struct xen_memory_reservation.
+ */
+#define XENMEM_increase_reservation 0
+#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap 6
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+/*
+ * Maximum # bits addressable by the user of the allocated region (e.g., I/O
+ * devices often have a 32-bit limitation even in 64-bit systems). If zero
+ * then the user has no addressing restriction. This field is not used by
+ * XENMEM_decrease_reservation.
+ */
+#define XENMEMF_address_bits(x) (x)
+#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
+/* NUMA node to allocate from. */
+#define XENMEMF_node(x) (((x) + 1) << 8)
+#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
+/* Flag to populate physmap with populate-on-demand entries */
+#define XENMEMF_populate_on_demand (1<<16)
+/* Flag to request allocation only from the node specified */
+#define XENMEMF_exact_node_request (1<<17)
+#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
+#endif
+
+struct xen_memory_reservation {
+
+ /*
+ * XENMEM_increase_reservation:
+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
+ * XENMEM_decrease_reservation:
+ * IN: GMFN bases of extents to free
+ * XENMEM_populate_physmap:
+ * IN: GPFN bases of extents to populate with memory
+ * OUT: GMFN bases of extents that were allocated
+ * (NB. This command also updates the mach_to_phys translation table)
+ * XENMEM_claim_pages:
+ * IN: must be zero
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
+ xen_ulong_t nr_extents;
+ unsigned int extent_order;
+
+#if __XEN_INTERFACE_VERSION__ >= 0x00030209
+ /* XENMEMF flags. */
+ unsigned int mem_flags;
+#else
+ unsigned int address_bits;
+#endif
+
+ /*
+ * Domain whose reservation is being changed.
+ * Unprivileged domains can specify only DOMID_SELF.
+ */
+ domid_t domid;
+};
+typedef struct xen_memory_reservation xen_memory_reservation_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
+
+/*
+ * An atomic exchange of memory pages. If return code is zero then
+ * @out.extent_list provides GMFNs of the newly-allocated memory.
+ * Returns zero on complete success, otherwise a negative error code.
+ * On complete success then always @nr_exchanged == @in.nr_extents.
+ * On partial success @nr_exchanged indicates how much work was done.
+ *
+ * Note that only PV guests can use this operation.
+ */
+#define XENMEM_exchange 11
+struct xen_memory_exchange {
+ /*
+ * [IN] Details of memory extents to be exchanged (GMFN bases).
+ * Note that @in.address_bits is ignored and unused.
+ */
+ struct xen_memory_reservation in;
+
+ /*
+ * [IN/OUT] Details of new memory extents.
+ * We require that:
+ * 1. @in.domid == @out.domid
+ * 2. @in.nr_extents << @in.extent_order ==
+ * @out.nr_extents << @out.extent_order
+ * 3. @in.extent_start and @out.extent_start lists must not overlap
+ * 4. @out.extent_start lists GPFN bases to be populated
+ * 5. @out.extent_start is overwritten with allocated GMFN bases
+ */
+ struct xen_memory_reservation out;
+
+ /*
+ * [OUT] Number of input extents that were successfully exchanged:
+ * 1. The first @nr_exchanged input extents were successfully
+ * deallocated.
+ * 2. The corresponding first entries in the output extent list correctly
+ * indicate the GMFNs that were successfully exchanged.
+ * 3. All other input and output extents are untouched.
+ * 4. If not all input exents are exchanged then the return code of this
+ * command will be non-zero.
+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
+ */
+ xen_ulong_t nr_exchanged;
+};
+typedef struct xen_memory_exchange xen_memory_exchange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
+
+/*
+ * Returns the maximum machine frame number of mapped RAM in this system.
+ * This command always succeeds (it never returns an error code).
+ * arg == NULL.
+ */
+#define XENMEM_maximum_ram_page 2
+
+struct xen_memory_domain {
+ /* [IN] Domain information is being queried for. */
+ domid_t domid;
+};
+
+/*
+ * Returns the current or maximum memory reservation, in pages, of the
+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
+ * arg == addr of struct xen_memory_domain.
+ */
+#define XENMEM_current_reservation 3
+#define XENMEM_maximum_reservation 4
+
+/*
+ * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF).
+ * Returns -ve errcode on failure.
+ * arg == addr of struct xen_memory_domain.
+ */
+#define XENMEM_maximum_gpfn 14
+
+/*
+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table do not implement
+ * this command.
+ * arg == addr of xen_machphys_mfn_list_t.
+ */
+#define XENMEM_machphys_mfn_list 5
+struct xen_machphys_mfn_list {
+ /*
+ * Size of the 'extent_start' array. Fewer entries will be filled if the
+ * machphys table is smaller than max_extents * 2MB.
+ */
+ unsigned int max_extents;
+
+ /*
+ * Pointer to buffer to fill with list of extent starts. If there are
+ * any large discontiguities in the machine address space, 2MB gaps in
+ * the machphys table will be represented by an MFN base of zero.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
+
+ /*
+ * Number of extents written to the above array. This will be smaller
+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
+ */
+ unsigned int nr_extents;
+};
+typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
+
+/*
+ * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
+ *
+ * For a non compat caller, this functions similarly to
+ * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
+ * m2p table.
+ */
+#define XENMEM_machphys_compat_mfn_list 25
+
+/*
+ * Returns the location in virtual address space of the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table, or which do not
+ * map it by default into guest address space, do not implement this command.
+ * arg == addr of xen_machphys_mapping_t.
+ */
+#define XENMEM_machphys_mapping 12
+struct xen_machphys_mapping {
+ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
+ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
+};
+typedef struct xen_machphys_mapping xen_machphys_mapping_t;
+DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
+
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
+ * XENMEM_add_to_physmap_batch only. */
+#define XENMAPSPACE_dev_mmio 5 /* device mmio region
+ ARM only; the region is mapped in
+ Stage-2 using the Normal Memory
+ Inner/Outer Write-Back Cacheable
+ memory attribute. */
+/* ` } */
+
+/*
+ * Sets the GPFN at which a particular page appears in the specified guest's
+ * physical address space (translated guests only).
+ * arg == addr of xen_add_to_physmap_t.
+ */
+#define XENMEM_add_to_physmap 7
+struct xen_add_to_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* Number of pages to go through for gmfn_range */
+ uint16_t size;
+
+ unsigned int space; /* => enum phys_map_space */
+
+#define XENMAPIDX_grant_table_status 0x80000000
+
+ /* Index into space being mapped. */
+ xen_ulong_t idx;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_add_to_physmap xen_add_to_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
+ /* IN */
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+ uint16_t space; /* => enum phys_map_space */
+
+ /* Number of pages to go through */
+ uint16_t size;
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
+#else
+ union xen_add_to_physmap_batch_extra {
+ domid_t foreign_domid; /* gmfn_foreign */
+ uint16_t res0; /* All the other spaces. Should be 0 */
+ } u;
+#endif
+
+ /* Indexes into space being mapped. */
+ XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+
+ /* OUT */
+
+ /* Per index error code. */
+ XEN_GUEST_HANDLE(int) errs;
+};
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
+
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * physical address space (translated guests only).
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap 15
+struct xen_remove_from_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* GPFN of the current mapping of the page. */
+ xen_pfn_t gpfn;
+};
+typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
+
+/*** REMOVED ***/
+/*#define XENMEM_translate_gpfn_list 8*/
+
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_memory_map 9
+struct xen_memory_map {
+ /*
+ * On call the number of entries which can be stored in buffer. On
+ * return the number of entries which have been stored in
+ * buffer.
+ */
+ unsigned int nr_entries;
+
+ /*
+ * Entries in the buffer are in the same format as returned by the
+ * BIOS INT 0x15 EAX=0xE820 call.
+ */
+ XEN_GUEST_HANDLE(void) buffer;
+};
+typedef struct xen_memory_map xen_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * Specifying buffer as NULL will return the number of entries required
+ * to store the complete memory map.
+ * arg == addr of xen_memory_map_t.
+ */
+#define XENMEM_machine_memory_map 10
+
+/*
+ * Set the pseudo-physical memory map of a domain, as returned by
+ * XENMEM_memory_map.
+ * arg == addr of xen_foreign_memory_map_t.
+ */
+#define XENMEM_set_memory_map 13
+struct xen_foreign_memory_map {
+ domid_t domid;
+ struct xen_memory_map map;
+};
+typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
+
+#define XENMEM_set_pod_target 16
+#define XENMEM_get_pod_target 17
+struct xen_pod_target {
+ /* IN */
+ uint64_t target_pages;
+ /* OUT */
+ uint64_t tot_pages;
+ uint64_t pod_cache_pages;
+ uint64_t pod_entries;
+ /* IN */
+ domid_t domid;
+};
+typedef struct xen_pod_target xen_pod_target_t;
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * Get the number of MFNs saved through memory sharing.
+ * The call never fails.
+ */
+#define XENMEM_get_sharing_freed_pages 18
+#define XENMEM_get_sharing_shared_pages 19
+
+#define XENMEM_paging_op 20
+#define XENMEM_paging_op_nominate 0
+#define XENMEM_paging_op_evict 1
+#define XENMEM_paging_op_prep 2
+
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
+ domid_t domain;
+
+ /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
+ XEN_GUEST_HANDLE_64(const_uint8) buffer;
+ /* IN: gfn of page being operated on */
+ uint64_aligned_t gfn;
+};
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
+
+#define XENMEM_access_op 21
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+/*
+ * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
+ * currently unused, but since they have been in use please do not reuse them.
+ *
+ * #define XENMEM_access_op_enable_emulate 2
+ * #define XENMEM_access_op_disable_emulate 3
+ */
+#define XENMEM_access_op_set_access_multi 4
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ /* xenmem_access_t */
+ uint8_t access;
+ domid_t domid;
+ /*
+ * Number of pages for set op (or size of pfn_list for
+ * XENMEM_access_op_set_access_multi)
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /*
+ * Corresponding list of access settings for pfn_list
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
+#define XENMEM_sharing_op 22
+#define XENMEM_sharing_op_nominate_gfn 0
+#define XENMEM_sharing_op_nominate_gref 1
+#define XENMEM_sharing_op_share 2
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
+#define XENMEM_sharing_op_range_share 8
+#define XENMEM_sharing_op_fork 9
+#define XENMEM_sharing_op_fork_reset 10
+
+#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
+#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
+
+/* The following allows sharing of grant refs. This is useful
+ * for sharing utilities sitting as "filters" in IO backends
+ * (e.g. memshr + blktap(2)). The IO backend is only exposed
+ * to grant references, and this allows sharing of the grefs */
+#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
+
+#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
+ (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
+#define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
+ ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
+#define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
+ ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
+
+struct xen_mem_sharing_op {
+ uint8_t op; /* XENMEM_sharing_op_* */
+ domid_t domain;
+
+ union {
+ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to nominate */
+ uint32_t grant_ref; /* IN: grant ref to nominate */
+ } u;
+ uint64_aligned_t handle; /* OUT: the handle */
+ } nominate;
+ struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
+ uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
+ uint64_aligned_t source_handle; /* IN: handle to the source page */
+ uint64_aligned_t client_gfn; /* IN: the client gfn */
+ uint64_aligned_t client_handle; /* IN: handle to the client page */
+ domid_t client_domain; /* IN: the client domain id */
+ } share;
+ struct mem_sharing_op_range { /* OP_RANGE_SHARE */
+ uint64_aligned_t first_gfn; /* IN: the first gfn */
+ uint64_aligned_t last_gfn; /* IN: the last gfn */
+ uint64_aligned_t opaque; /* Must be set to 0 */
+ domid_t client_domain; /* IN: the client domain id */
+ uint16_t _pad[3]; /* Must be set to 0 */
+ } range;
+ struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to debug */
+ uint64_aligned_t mfn; /* IN: mfn to debug */
+ uint32_t gref; /* IN: gref to debug */
+ } u;
+ } debug;
+ struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */
+ domid_t parent_domain; /* IN: parent's domain id */
+/* Only makes sense for short-lived forks */
+#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
+/* Only makes sense for short-lived forks */
+#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
+#define XENMEM_FORK_RESET_STATE (1u << 2)
+#define XENMEM_FORK_RESET_MEMORY (1u << 3)
+ uint16_t flags; /* IN: optional settings */
+ uint32_t pad; /* Must be set to 0 */
+ } fork;
+ } u;
+};
+typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
+
+/*
+ * Attempt to stake a claim for a domain on a quantity of pages
+ * of system RAM, but _not_ assign specific pageframes. Only
+ * arithmetic is performed so the hypercall is very fast and need
+ * not be preemptible, thus sidestepping time-of-check-time-of-use
+ * races for memory allocation. Returns 0 if the hypervisor page
+ * allocator has atomically and successfully claimed the requested
+ * number of pages, else non-zero.
+ *
+ * Any domain may have only one active claim. When sufficient memory
+ * has been allocated to resolve the claim, the claim silently expires.
+ * Claiming zero pages effectively resets any outstanding claim and
+ * is always successful.
+ *
+ * Note that a valid claim may be staked even after memory has been
+ * allocated for a domain. In this case, the claim is not incremental,
+ * i.e. if the domain's total page count is 3, and a claim is staked
+ * for 10, only 7 additional pages are claimed.
+ *
+ * Caller must be privileged or the hypercall fails.
+ */
+#define XENMEM_claim_pages 24
+
+/*
+ * XENMEM_claim_pages flags - the are no flags at this time.
+ * The zero value is appropriate.
+ */
+
+/*
+ * With some legacy devices, certain guest-physical addresses cannot safely
+ * be used for other purposes, e.g. to map guest RAM. This hypercall
+ * enumerates those regions so the toolstack can avoid using them.
+ */
+#define XENMEM_reserved_device_memory_map 27
+struct xen_reserved_device_memory {
+ xen_pfn_t start_pfn;
+ xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
+ /* IN */
+ uint32_t flags;
+ /*
+ * IN/OUT
+ *
+ * Gets set to the required number of entries when too low,
+ * signaled by error code -ERANGE.
+ */
+ unsigned int nr_entries;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+ /* IN */
+ union {
+ physdev_pci_device_t pci;
+ } dev;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - The domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+#define XENMEM_resource_grant_table 1
+#define XENMEM_resource_vmtrace_buf 2
+
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
+ * type == XENMEM_resource_grant_table -> id defined below
+ */
+ uint32_t id;
+
+#define XENMEM_resource_grant_table_id_shared 0
+#define XENMEM_resource_grant_table_id_status 1
+
+ /*
+ * IN/OUT
+ *
+ * As an IN parameter number of frames of the resource to be mapped.
+ * This value may be updated over the course of the operation.
+ *
+ * When frame_list is NULL and nr_frames is 0, this is interpreted as a
+ * request for the size of the resource, which shall be returned in the
+ * nr_frames field.
+ *
+ * The size of a resource will never be zero, but a nonzero result doesn't
+ * guarantee that a subsequent mapping request will be successful. There
+ * are further type/id specific constraints which may change between the
+ * two calls.
+ */
+ uint32_t nr_frames;
+ /*
+ * Padding field, must be zero on input.
+ * In a previous version this was an output field with the lowest bit
+ * named XENMEM_rsrc_acq_caller_owned. Future versions of this interface
+ * will not reuse this bit as an output with the field being zero on
+ * input.
+ */
+ uint32_t pad;
+ /*
+ * IN - the index of the initial frame to be mapped. This parameter
+ * is ignored if nr_frames is 0. This value may be updated
+ * over the course of the operation.
+ */
+ uint64_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
+
+ /*
+ * IN/OUT - If the tools domain is PV then, upon return, frame_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, frame_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ * If -EIO is returned then the frame_list has only been
+ * partially mapped and it is up to the caller to unmap all
+ * the GFNs.
+ * This parameter may be NULL if nr_frames is 0. This
+ * value may be updated over the course of the operation.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+};
+typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
+
+/*
+ * XENMEM_get_vnumainfo used by guest to get
+ * vNUMA topology from hypervisor.
+ */
+#define XENMEM_get_vnumainfo 26
+
+/* vNUMA node memory ranges */
+struct xen_vmemrange {
+ uint64_t start, end;
+ unsigned int flags;
+ unsigned int nid;
+};
+typedef struct xen_vmemrange xen_vmemrange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
+
+/*
+ * vNUMA topology specifies vNUMA node number, distance table,
+ * memory ranges and vcpu mapping provided for guests.
+ * XENMEM_get_vnumainfo hypercall expects to see from guest
+ * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
+ * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
+ * copied back to guest. Domain returns expected values of nr_vnodes,
+ * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
+ */
+struct xen_vnuma_topology_info {
+ /* IN */
+ domid_t domid;
+ uint16_t pad;
+ /* IN/OUT */
+ unsigned int nr_vnodes;
+ unsigned int nr_vcpus;
+ unsigned int nr_vmemranges;
+ /* OUT */
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vdistance;
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vcpu_to_vnode;
+ union {
+ XEN_GUEST_HANDLE(xen_vmemrange_t) h;
+ uint64_t pad;
+ } vmemrange;
+};
+typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
+
+/* Next available subop number is 29 */
+
+#endif /* __XEN_PUBLIC_MEMORY_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/physdev.h b/include/hw/xen/interface/physdev.h
new file mode 100644
index 0000000000..f0c0d4727c
--- /dev/null
+++ b/include/hw/xen/interface/physdev.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2006, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+};
+typedef struct physdev_eoi physdev_eoi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+
+/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v1 17
+/*
+ * Register a shared page for the hypervisor to indicate whether the
+ * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to
+ * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of
+ * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by
+ * Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v2 28
+struct physdev_pirq_eoi_gmfn {
+ /* IN */
+ xen_pfn_t gmfn;
+};
+typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+struct physdev_irq_status_query {
+ /* IN */
+ uint32_t irq;
+ /* OUT */
+ uint32_t flags; /* XENIRQSTAT_* */
+};
+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared (1)
+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+struct physdev_set_iopl {
+ /* IN */
+ uint32_t iopl;
+};
+typedef struct physdev_set_iopl physdev_set_iopl_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+struct physdev_set_iobitmap {
+ /* IN */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(uint8) bitmap;
+#else
+ uint8_t *bitmap;
+#endif
+ uint32_t nr_ports;
+};
+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+struct physdev_apic {
+ /* IN */
+ unsigned long apic_physbase;
+ uint32_t reg;
+ /* IN or OUT */
+ uint32_t value;
+};
+typedef struct physdev_apic physdev_apic_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+struct physdev_irq {
+ /* IN */
+ uint32_t irq;
+ /* IN or OUT */
+ uint32_t vector;
+};
+typedef struct physdev_irq physdev_irq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+
+#define MAP_PIRQ_TYPE_MSI 0x0
+#define MAP_PIRQ_TYPE_GSI 0x1
+#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
+#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
+
+#define PHYSDEVOP_map_pirq 13
+struct physdev_map_pirq {
+ domid_t domid;
+ /* IN */
+ int type;
+ /* IN (ignored for ..._MULTI_MSI) */
+ int index;
+ /* IN or OUT */
+ int pirq;
+ /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
+ int bus;
+ /* IN */
+ int devfn;
+ /* IN (also OUT for ..._MULTI_MSI) */
+ int entry_nr;
+ /* IN */
+ uint64_t table_base;
+};
+typedef struct physdev_map_pirq physdev_map_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
+
+#define PHYSDEVOP_unmap_pirq 14
+struct physdev_unmap_pirq {
+ domid_t domid;
+ /* IN */
+ int pirq;
+};
+
+typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
+
+#define PHYSDEVOP_manage_pci_add 15
+#define PHYSDEVOP_manage_pci_remove 16
+struct physdev_manage_pci {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+
+typedef struct physdev_manage_pci physdev_manage_pci_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+
+#define PHYSDEVOP_restore_msi 19
+struct physdev_restore_msi {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_restore_msi physdev_restore_msi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t);
+
+#define PHYSDEVOP_manage_pci_add_ext 20
+struct physdev_manage_pci_ext {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t is_extfn;
+ uint32_t is_virtfn;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+};
+
+typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t);
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+ uint32_t cmd;
+ union {
+ physdev_irq_status_query_t irq_status_query;
+ physdev_set_iopl_t set_iopl;
+ physdev_set_iobitmap_t set_iobitmap;
+ physdev_apic_t apic_op;
+ physdev_irq_t irq_op;
+ } u;
+};
+typedef struct physdev_op physdev_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+
+#define PHYSDEVOP_setup_gsi 21
+struct physdev_setup_gsi {
+ int gsi;
+ /* IN */
+ uint8_t triggering;
+ /* IN */
+ uint8_t polarity;
+ /* IN */
+};
+
+typedef struct physdev_setup_gsi physdev_setup_gsi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t);
+
+/* leave PHYSDEVOP 22 free */
+
+/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI
+ * the hypercall returns a free pirq */
+#define PHYSDEVOP_get_free_pirq 23
+struct physdev_get_free_pirq {
+ /* IN */
+ int type;
+ /* OUT */
+ uint32_t pirq;
+};
+
+typedef struct physdev_get_free_pirq physdev_get_free_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t);
+
+#define XEN_PCI_MMCFG_RESERVED 0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved 24
+struct physdev_pci_mmcfg_reserved {
+ uint64_t address;
+ uint16_t segment;
+ uint8_t start_bus;
+ uint8_t end_bus;
+ uint32_t flags;
+};
+typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t);
+
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+ /*
+ * Optional parameters array.
+ * First element ([0]) is PXM domain associated with the device (if
+ * XEN_PCI_DEV_PXM is set)
+ */
+ uint32_t optarr[XEN_FLEX_ARRAY_DIM];
+};
+typedef struct physdev_pci_device_add physdev_pci_device_add_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix 30
+#define PHYSDEVOP_release_msix 31
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_pci_device physdev_pci_device_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
+
+#define PHYSDEVOP_DBGP_RESET_PREPARE 1
+#define PHYSDEVOP_DBGP_RESET_DONE 2
+
+#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0
+#define PHYSDEVOP_DBGP_BUS_PCI 1
+
+#define PHYSDEVOP_dbgp_op 29
+struct physdev_dbgp_op {
+ /* IN */
+ uint8_t op;
+ uint8_t bus;
+ union {
+ physdev_pci_device_t pci;
+ } u;
+};
+typedef struct physdev_dbgp_op physdev_dbgp_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t);
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202. The guard above was
+ * added post-4.5 only though and hence shouldn't check for 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#endif
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040200
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
+#else
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2
+#endif
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/sched.h b/include/hw/xen/interface/sched.h
new file mode 100644
index 0000000000..b4362c6a1d
--- /dev/null
+++ b/include/hw/xen/interface/sched.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * sched.h
+ *
+ * Scheduler state interactions
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_SCHED_H__
+#define __XEN_PUBLIC_SCHED_H__
+
+#include "event_channel.h"
+
+/*
+ * `incontents 150 sched Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
+ * The prototype for this hypercall is:
+ * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == Operation-specific extra argument(s), as described below.
+ * ... == Additional Operation-specific extra arguments, described below.
+ *
+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
+ * of this hypercall, supporting only the commands yield, block and shutdown:
+ * long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
+ *
+ * This legacy version is available to new guests as:
+ * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
+ */
+
+/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
+/*
+ * Voluntarily yield the CPU.
+ * @arg == NULL.
+ */
+#define SCHEDOP_yield 0
+
+/*
+ * Block execution of this VCPU until an event is received for processing.
+ * If called with event upcalls masked, this operation will atomically
+ * reenable event delivery and check for pending events before blocking the
+ * VCPU. This avoids a "wakeup waiting" race.
+ * @arg == NULL.
+ */
+#define SCHEDOP_block 1
+
+/*
+ * Halt execution of this domain (all VCPUs) and notify the system controller.
+ * @arg == pointer to sched_shutdown_t structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then
+ * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
+ * of the guest's start info page. RDX/EDX is the third hypercall
+ * argument.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
+ */
+#define SCHEDOP_shutdown 2
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll_t structure.
+ */
+#define SCHEDOP_poll 3
+
+/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown_t structure.
+ */
+#define SCHEDOP_remote_shutdown 4
+
+/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog_t structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ * after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog 6
+
+/*
+ * Override the current vcpu affinity by pinning it to one physical cpu or
+ * undo this override restoring the previous affinity.
+ * @arg == pointer to sched_pin_override_t structure.
+ *
+ * A negative pcpu value will undo a previous pin override and restore the
+ * previous cpu affinity.
+ * This call is allowed for the hardware domain only and requires the cpu
+ * to be part of the domain's cpupool.
+ */
+#define SCHEDOP_pin_override 7
+/* ` } */
+
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_shutdown sched_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
+
+struct sched_poll {
+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+typedef struct sched_poll sched_poll_t;
+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
+
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
+
+struct sched_watchdog {
+ uint32_t id; /* watchdog ID */
+ uint32_t timeout; /* timeout */
+};
+typedef struct sched_watchdog sched_watchdog_t;
+DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
+
+struct sched_pin_override {
+ int32_t pcpu;
+};
+typedef struct sched_pin_override sched_pin_override_t;
+DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+/* ` enum sched_shutdown_reason { */
+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
+#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
+/* ` } */
+
+#endif /* __XEN_PUBLIC_SCHED_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/trace.h b/include/hw/xen/interface/trace.h
new file mode 100644
index 0000000000..62a179971d
--- /dev/null
+++ b/include/hw/xen/interface/trace.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * include/public/trace.h
+ *
+ * Mark Williamson, (C) 2004 Intel Research Cambridge
+ * Copyright (C) 2005 Bin Ren
+ */
+
+#ifndef __XEN_PUBLIC_TRACE_H__
+#define __XEN_PUBLIC_TRACE_H__
+
+#define TRACE_EXTRA_MAX 7
+#define TRACE_EXTRA_SHIFT 28
+
+/* Trace classes */
+#define TRC_CLS_SHIFT 16
+#define TRC_GEN 0x0001f000 /* General trace */
+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
+#define TRC_MEM 0x0010f000 /* Xen memory trace */
+#define TRC_PV 0x0020f000 /* Xen PV traces */
+#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */
+#define TRC_HW 0x0080f000 /* Xen hardware-related traces */
+#define TRC_GUEST 0x0800f000 /* Guest-generated traces */
+#define TRC_ALL 0x0ffff000
+#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
+#define TRC_HD_CYCLE_FLAG (1UL<<31)
+#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
+#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
+
+/* Trace subclasses */
+#define TRC_SUBCLS_SHIFT 12
+
+/* trace subclasses for SVM */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_EMUL 0x00084000 /* emulated devices */
+
+#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
+#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+
+/*
+ * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are
+ * reserved for encoding what scheduler produced the information. The
+ * actual event is encoded in the last 9 bits.
+ *
+ * This means we have 8 scheduling IDs available (which means at most 8
+ * schedulers generating events) and, in each scheduler, up to 512
+ * different events.
+ */
+#define TRC_SCHED_ID_BITS 3
+#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS)
+#define TRC_SCHED_ID_MASK (((1UL<<TRC_SCHED_ID_BITS) - 1) << TRC_SCHED_ID_SHIFT)
+#define TRC_SCHED_EVT_MASK (~(TRC_SCHED_ID_MASK))
+
+/* Per-scheduler IDs, to identify scheduler specific events */
+#define TRC_SCHED_CSCHED 0
+#define TRC_SCHED_CSCHED2 1
+/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
+#define TRC_SCHED_ARINC653 3
+#define TRC_SCHED_RTDS 4
+#define TRC_SCHED_SNULL 5
+
+/* Per-scheduler tracing */
+#define TRC_SCHED_CLASS_EVT(_c, _e) \
+ ( ( TRC_SCHED_CLASS | \
+ ((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
+ (_e & TRC_SCHED_EVT_MASK) )
+
+/* Trace classes for DOM0 operations */
+#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
+
+/* Trace classes for Hardware */
+#define TRC_HW_PM 0x00801000 /* Power management traces */
+#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
+
+/* Trace events per class */
+#define TRC_LOST_RECORDS (TRC_GEN + 1)
+#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
+#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
+
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2)
+#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
+#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
+#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
+#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4)
+#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5)
+#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6)
+#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7)
+#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8)
+#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9)
+#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10)
+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11)
+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12)
+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
+#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
+#define TRC_SCHED_SWITCH_INFCONT (TRC_SCHED_VERBOSE + 17)
+
+#define TRC_DOM0_DOM_ADD (TRC_DOM0_DOMOPS + 1)
+#define TRC_DOM0_DOM_REM (TRC_DOM0_DOMOPS + 2)
+
+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
+#define TRC_PV_ENTRY 0x00201000 /* Hypervisor entry points for PV guests. */
+#define TRC_PV_SUBCALL 0x00202000 /* Sub-call in a multicall hypercall */
+
+#define TRC_PV_HYPERCALL (TRC_PV_ENTRY + 1)
+#define TRC_PV_TRAP (TRC_PV_ENTRY + 3)
+#define TRC_PV_PAGE_FAULT (TRC_PV_ENTRY + 4)
+#define TRC_PV_FORCED_INVALID_OP (TRC_PV_ENTRY + 5)
+#define TRC_PV_EMULATE_PRIVOP (TRC_PV_ENTRY + 6)
+#define TRC_PV_EMULATE_4GB (TRC_PV_ENTRY + 7)
+#define TRC_PV_MATH_STATE_RESTORE (TRC_PV_ENTRY + 8)
+#define TRC_PV_PAGING_FIXUP (TRC_PV_ENTRY + 9)
+#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV_ENTRY + 10)
+#define TRC_PV_PTWR_EMULATION (TRC_PV_ENTRY + 11)
+#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV_ENTRY + 12)
+#define TRC_PV_HYPERCALL_V2 (TRC_PV_ENTRY + 13)
+#define TRC_PV_HYPERCALL_SUBCALL (TRC_PV_SUBCALL + 14)
+
+/*
+ * TRC_PV_HYPERCALL_V2 format
+ *
+ * Only some of the hypercall argument are recorded. Bit fields A0 to
+ * A5 in the first extra word are set if the argument is present and
+ * the arguments themselves are packed sequentially in the following
+ * words.
+ *
+ * The TRC_64_FLAG bit is not set for these events (even if there are
+ * 64-bit arguments in the record).
+ *
+ * Word
+ * 0 bit 31 30|29 28|27 26|25 24|23 22|21 20|19 ... 0
+ * A5 |A4 |A3 |A2 |A1 |A0 |Hypercall op
+ * 1 First 32 bit (or low word of first 64 bit) arg in record
+ * 2 Second 32 bit (or high word of first 64 bit) arg in record
+ * ...
+ *
+ * A0-A5 bitfield values:
+ *
+ * 00b Argument not present
+ * 01b 32-bit argument present
+ * 10b 64-bit argument present
+ * 11b Reserved
+ */
+#define TRC_PV_HYPERCALL_V2_ARG_32(i) (0x1 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_64(i) (0x2 << (20 + 2*(i)))
+#define TRC_PV_HYPERCALL_V2_ARG_MASK (0xfff00000)
+
+#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
+#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
+#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3)
+#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4)
+#define TRC_SHADOW_MMIO (TRC_SHADOW + 5)
+#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6)
+#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7)
+#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8)
+#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9)
+#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10)
+#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11)
+#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12)
+#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13)
+#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14)
+#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
+
+/* trace events per subclass */
+#define TRC_HVM_NESTEDFLAG (0x400)
+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
+#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
+#define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16)
+#define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17)
+#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
+#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
+#define TRC_HVM_RDTSC (TRC_HVM_HANDLER + 0x1a)
+#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21)
+#define TRC_HVM_REALMODE_EMULATE (TRC_HVM_HANDLER + 0x22)
+#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
+#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
+#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
+#define TRC_HVM_XCR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x26)
+#define TRC_HVM_XCR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x27)
+
+#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
+#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
+
+/* Trace events for emulated devices */
+#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1)
+#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2)
+#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3)
+#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4)
+#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5)
+#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6)
+#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7)
+#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8)
+#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9)
+#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA)
+#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB)
+#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC)
+#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD)
+#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE)
+#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF)
+#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10)
+#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11)
+
+/* trace events for per class */
+#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
+#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
+#define TRC_PM_IDLE_EXIT (TRC_HW_PM + 0x03)
+
+/* Trace events for IRQs */
+#define TRC_HW_IRQ_MOVE_CLEANUP_DELAY (TRC_HW_IRQ + 0x1)
+#define TRC_HW_IRQ_MOVE_CLEANUP (TRC_HW_IRQ + 0x2)
+#define TRC_HW_IRQ_BIND_VECTOR (TRC_HW_IRQ + 0x3)
+#define TRC_HW_IRQ_CLEAR_VECTOR (TRC_HW_IRQ + 0x4)
+#define TRC_HW_IRQ_MOVE_FINISH (TRC_HW_IRQ + 0x5)
+#define TRC_HW_IRQ_ASSIGN_VECTOR (TRC_HW_IRQ + 0x6)
+#define TRC_HW_IRQ_UNMAPPED_VECTOR (TRC_HW_IRQ + 0x7)
+#define TRC_HW_IRQ_HANDLED (TRC_HW_IRQ + 0x8)
+
+/*
+ * Event Flags
+ *
+ * Some events (e.g, TRC_PV_TRAP and TRC_HVM_IOMEM_READ) have multiple
+ * record formats. These event flags distinguish between the
+ * different formats.
+ */
+#define TRC_64_FLAG 0x100 /* Addresses are 64 bits (instead of 32 bits) */
+
+/* This structure represents a single trace buffer record. */
+struct t_rec {
+ uint32_t event:28;
+ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */
+ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */
+ union {
+ struct {
+ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
+ uint32_t extra_u32[7]; /* event data items */
+ } cycles;
+ struct {
+ uint32_t extra_u32[7]; /* event data items */
+ } nocycles;
+ } u;
+};
+
+/*
+ * This structure contains the metadata for a single trace buffer. The head
+ * field, indexes into an array of struct t_rec's.
+ */
+struct t_buf {
+ /* Assume the data buffer size is X. X is generally not a power of 2.
+ * CONS and PROD are incremented modulo (2*X):
+ * 0 <= cons < 2*X
+ * 0 <= prod < 2*X
+ * This is done because addition modulo X breaks at 2^32 when X is not a
+ * power of 2:
+ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
+ */
+ uint32_t cons; /* Offset of next item to be consumed by control tools. */
+ uint32_t prod; /* Offset of next item to be produced by Xen. */
+ /* Records follow immediately after the meta-data header. */
+};
+
+/* Structure used to pass MFNs to the trace buffers back to trace consumers.
+ * Offset is an offset into the mapped structure where the mfn list will be held.
+ * MFNs will be at ((unsigned long *)(t_info))+(t_info->cpu_offset[cpu]).
+ */
+struct t_info {
+ uint16_t tbuf_size; /* Size in pages of each trace buffer */
+ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */
+ /* MFN lists immediately after the header */
+};
+
+#endif /* __XEN_PUBLIC_TRACE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/vcpu.h b/include/hw/xen/interface/vcpu.h
new file mode 100644
index 0000000000..81a3b3a743
--- /dev/null
+++ b/include/hw/xen/interface/vcpu.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU initialisation, query, and hotplug.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args)
+ * @cmd == VCPUOP_??? (VCPU operation).
+ * @vcpuid == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Initialise a VCPU. Each VCPU can be initialised only once. A
+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context
+ * structure containing the initial state for the VCPU. For x86
+ * HVM based guests this is a pointer to a vcpu_hvm_context
+ * structure.
+ */
+#define VCPUOP_initialise 0
+
+/*
+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
+ * if the VCPU has not been initialised (VCPUOP_initialise).
+ */
+#define VCPUOP_up 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ * 1. This operation may return, and VCPU_is_up may return false, before the
+ * VCPU stops running (i.e., the command is asynchronous). It is a good
+ * idea to ensure that the VCPU has entered a non-critical loop before
+ * bringing it down. Alternatively, this operation is guaranteed
+ * synchronous if invoked by the VCPU itself.
+ * 2. After a VCPU is initialised, there is currently no way to drop all its
+ * references to domain memory. Even a VCPU that is down still holds
+ * memory references via its pagetable base pointer and GDT. It is good
+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
+ * GDT before bringing it down.
+ */
+#define VCPUOP_down 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up 3
+
+/*
+ * Return information about the state and running time of a VCPU.
+ * @extra_arg == pointer to vcpu_runstate_info structure.
+ */
+#define VCPUOP_get_runstate_info 4
+struct vcpu_runstate_info {
+ /* VCPU's current state (RUNSTATE_*). */
+ int state;
+ /* When was current state entered (system time, ns)? */
+ uint64_t state_entry_time;
+ /*
+ * Update indicator set in state_entry_time:
+ * When activated via VMASST_TYPE_runstate_update_flag, set during
+ * updates in guest memory mapped copy of vcpu_runstate_info.
+ */
+#define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63)
+ /*
+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
+ * guaranteed not to drift from system time.
+ */
+ uint64_t time[4];
+};
+typedef struct vcpu_runstate_info vcpu_runstate_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
+
+/* VCPU is currently running on a physical CPU. */
+#define RUNSTATE_running 0
+
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define RUNSTATE_runnable 1
+
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define RUNSTATE_blocked 2
+
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define RUNSTATE_offline 3
+
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ * 1. The registered address may be virtual or physical or guest handle,
+ * depending on the platform. Virtual address or guest handle should be
+ * registered on x86 systems.
+ * 2. Only one shared area may be registered per VCPU. The shared area is
+ * updated by the hypervisor each time the VCPU is scheduled. Thus
+ * runstate.state will always be RUNSTATE_running and
+ * runstate.state_entry_time will indicate the system time at which the
+ * VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_memory_area 5
+struct vcpu_register_runstate_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
+ struct vcpu_runstate_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+ uint64_t period_ns;
+};
+typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
+ uint32_t flags; /* VCPU_SSHOTTMR_??? */
+};
+typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
+
+/* Flags to VCPUOP_set_singleshot_timer. */
+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
+#define _VCPU_SSHOTTMR_future (0)
+#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
+
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure. This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ *
+ * This may be called only once per vcpu.
+ */
+#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
+struct vcpu_register_vcpu_info {
+ uint64_t mfn; /* mfn of page to place vcpu_info */
+ uint32_t offset; /* offset within page */
+ uint32_t rsvd; /* unused */
+};
+typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
+
+/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
+#define VCPUOP_send_nmi 11
+
+/*
+ * Get the physical ID information for a pinned vcpu's underlying physical
+ * processor. The physical ID informmation is architecture-specific.
+ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
+ * This command returns -EINVAL if it is not a valid operation for this VCPU.
+ */
+#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
+struct vcpu_get_physid {
+ uint64_t phys_id;
+};
+typedef struct vcpu_get_physid vcpu_get_physid_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
+#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
+#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
+
+/*
+ * Register a memory location to get a secondary copy of the vcpu time
+ * parameters. The master copy still exists as part of the vcpu shared
+ * memory area, and this secondary copy is updated whenever the master copy
+ * is updated (and using the same versioning scheme for synchronisation).
+ *
+ * The intent is that this copy may be mapped (RO) into userspace so
+ * that usermode can compute system time using the time info and the
+ * tsc. Usermode will see an array of vcpu_time_info structures, one
+ * for each vcpu, and choose the right one by an existing mechanism
+ * which allows it to get the current vcpu number (such as via a
+ * segment limit). It can then apply the normal algorithm to compute
+ * system time from the tsc.
+ *
+ * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
+ */
+#define VCPUOP_register_vcpu_time_memory_area 13
+DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t);
+struct vcpu_register_time_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_time_info_t) h;
+ struct vcpu_time_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/version.h b/include/hw/xen/interface/version.h
new file mode 100644
index 0000000000..9c78b4f3b6
--- /dev/null
+++ b/include/hw/xen/interface/version.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+#include "xen.h"
+
+/* NB. All ops return zero on success, except XENVER_{version,pagesize}
+ * XENVER_{version,pagesize,build_id} */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version 0
+
+/* arg == xen_extraversion_t. */
+#define XENVER_extraversion 1
+typedef char xen_extraversion_t[16];
+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
+
+/* arg == xen_compile_info_t. */
+#define XENVER_compile_info 2
+struct xen_compile_info {
+ char compiler[64];
+ char compile_by[16];
+ char compile_domain[32];
+ char compile_date[32];
+};
+typedef struct xen_compile_info xen_compile_info_t;
+
+#define XENVER_capabilities 3
+typedef char xen_capabilities_info_t[1024];
+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
+
+#define XENVER_changeset 4
+typedef char xen_changeset_info_t[64];
+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
+
+#define XENVER_platform_parameters 5
+struct xen_platform_parameters {
+ xen_ulong_t virt_start;
+};
+typedef struct xen_platform_parameters xen_platform_parameters_t;
+
+#define XENVER_get_features 6
+struct xen_feature_info {
+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
+ uint32_t submap; /* OUT: 32-bit submap */
+};
+typedef struct xen_feature_info xen_feature_info_t;
+
+/* Declares the features reported by XENVER_get_features. */
+#include "features.h"
+
+/* arg == NULL; returns host memory page size. */
+#define XENVER_pagesize 7
+
+/* arg == xen_domain_handle_t.
+ *
+ * The toolstack fills it out for guest consumption. It is intended to hold
+ * the UUID of the guest.
+ */
+#define XENVER_guest_handle 8
+
+#define XENVER_commandline 9
+typedef char xen_commandline_t[1024];
+
+/*
+ * Return value is the number of bytes written, or XEN_Exx on error.
+ * Calling with empty parameter returns the size of build_id.
+ */
+#define XENVER_build_id 10
+struct xen_build_id {
+ uint32_t len; /* IN: size of buf[]. */
+ unsigned char buf[XEN_FLEX_ARRAY_DIM];
+ /* OUT: Variable length buffer with build_id. */
+};
+typedef struct xen_build_id xen_build_id_t;
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/interface/xen-compat.h b/include/hw/xen/interface/xen-compat.h
new file mode 100644
index 0000000000..97fe698498
--- /dev/null
+++ b/include/hw/xen/interface/xen-compat.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen-compat.h
+ *
+ * Guest OS interface to Xen. Compatibility layer.
+ *
+ * Copyright (c) 2006, Christian Limpach
+ */
+
+#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
+#define __XEN_PUBLIC_XEN_COMPAT_H__
+
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040e00
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Xen is built with matching headers and implements the latest interface. */
+#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
+#elif !defined(__XEN_INTERFACE_VERSION__)
+/* Guests which do not specify a version get the legacy interface. */
+#define __XEN_INTERFACE_VERSION__ 0x00000000
+#endif
+
+#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
+#error "These header files do not support the requested interface version."
+#endif
+
+#define COMPAT_FLEX_ARRAY_DIM XEN_FLEX_ARRAY_DIM
+
+#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
diff --git a/include/hw/xen/interface/xen.h b/include/hw/xen/interface/xen.h
new file mode 100644
index 0000000000..920567e006
--- /dev/null
+++ b/include/hw/xen/interface/xen.h
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: MIT */
+/******************************************************************************
+ * xen.h
+ *
+ * Guest OS interface to Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_XEN_H__
+#define __XEN_PUBLIC_XEN_H__
+
+#include "xen-compat.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#include "arch-x86/xen.h"
+#elif defined(__arm__) || defined (__aarch64__)
+#include "arch-arm.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+DEFINE_XEN_GUEST_HANDLE(char);
+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
+DEFINE_XEN_GUEST_HANDLE(int);
+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
+DEFINE_XEN_GUEST_HANDLE(long);
+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+#endif
+DEFINE_XEN_GUEST_HANDLE(void);
+
+DEFINE_XEN_GUEST_HANDLE(uint64_t);
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
+
+/* Define a variable length array (depends on compiler). */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+#define XEN_FLEX_ARRAY_DIM
+#elif defined(__GNUC__)
+#define XEN_FLEX_ARRAY_DIM 0
+#else
+#define XEN_FLEX_ARRAY_DIM 1 /* variable size */
+#endif
+
+/* Turn a plain number into a C unsigned (long (long)) constant. */
+#define __xen_mk_uint(x) x ## U
+#define __xen_mk_ulong(x) x ## UL
+#ifndef __xen_mk_ullong
+# define __xen_mk_ullong(x) x ## ULL
+#endif
+#define xen_mk_uint(x) __xen_mk_uint(x)
+#define xen_mk_ulong(x) __xen_mk_ulong(x)
+#define xen_mk_ullong(x) __xen_mk_ullong(x)
+
+#else
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define xen_mk_uint(x) x
+#define xen_mk_ulong(x) x
+#define xen_mk_ullong(x) x
+
+#endif
+
+/*
+ * HYPERCALLS
+ */
+
+/* `incontents 100 hcalls List of hypercalls
+ * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()
+ */
+
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_set_gdt 2
+#define __HYPERVISOR_stack_switch 3
+#define __HYPERVISOR_set_callbacks 4
+#define __HYPERVISOR_fpu_taskswitch 5
+#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
+#define __HYPERVISOR_platform_op 7
+#define __HYPERVISOR_set_debugreg 8
+#define __HYPERVISOR_get_debugreg 9
+#define __HYPERVISOR_update_descriptor 10
+#define __HYPERVISOR_memory_op 12
+#define __HYPERVISOR_multicall 13
+#define __HYPERVISOR_update_va_mapping 14
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
+#define __HYPERVISOR_xen_version 17
+#define __HYPERVISOR_console_io 18
+#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
+#define __HYPERVISOR_grant_table_op 20
+#define __HYPERVISOR_vm_assist 21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op 24
+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_xsm_op 27
+#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_callback_op 30
+#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
+#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
+#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_argo_op 39
+#define __HYPERVISOR_xenpmu_op 40
+#define __HYPERVISOR_dm_op 41
+#define __HYPERVISOR_hypfs_op 42
+
+/* Architecture-specific hypercall definitions. */
+#define __HYPERVISOR_arch_0 48
+#define __HYPERVISOR_arch_1 49
+#define __HYPERVISOR_arch_2 50
+#define __HYPERVISOR_arch_3 51
+#define __HYPERVISOR_arch_4 52
+#define __HYPERVISOR_arch_5 53
+#define __HYPERVISOR_arch_6 54
+#define __HYPERVISOR_arch_7 55
+
+/* ` } */
+
+/*
+ * HYPERCALL COMPATIBILITY.
+ */
+
+/* New sched_op hypercall introduced in 0x00030101. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030101
+#undef __HYPERVISOR_sched_op
+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
+#endif
+
+/* New event-channel and physdev hypercalls introduced in 0x00030202. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030202
+#undef __HYPERVISOR_event_channel_op
+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
+#undef __HYPERVISOR_physdev_op
+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
+#endif
+
+/* New platform_op hypercall introduced in 0x00030204. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030204
+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
+#endif
+
+/*
+ * VIRTUAL INTERRUPTS
+ *
+ * Virtual interrupts that a guest OS may receive from Xen.
+ *
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
+ */
+/* ` enum virq { */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occurred */
+#define VIRQ_ARGO 11 /* G. Argo interdomain message notification */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* V. PMC interrupt */
+
+/* Architecture-specific VIRQ definitions. */
+#define VIRQ_ARCH_0 16
+#define VIRQ_ARCH_1 17
+#define VIRQ_ARCH_2 18
+#define VIRQ_ARCH_3 19
+#define VIRQ_ARCH_4 20
+#define VIRQ_ARCH_5 21
+#define VIRQ_ARCH_6 22
+#define VIRQ_ARCH_7 23
+/* ` } */
+
+#define NR_VIRQS 24
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * ` unsigned count, unsigned *done_out,
+ * ` unsigned foreigndom)
+ * `
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
+ *
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
+ * ptr[:2] -- Machine address of the page-table entry to modify.
+ * val -- Value to write.
+ *
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
+ *
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
+ * The frame must belong to the FD, if one is specified.
+ * val -- Value to write into the mapping entry.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
+ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
+ * with those in @val.
+ *
+ * ptr[1:0] == MMU_PT_UPDATE_NO_TRANSLATE:
+ * As MMU_NORMAL_PT_UPDATE above, but @val is not translated though FD
+ * page tables.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
+ */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
+ /* val never translated. */
+
+/*
+ * MMU EXTENDED OPERATIONS
+ *
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * ` unsigned int count,
+ * ` unsigned int *pdone,
+ * ` unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ *
+ * cmd: MMUEXT_(UN)PIN_*_TABLE
+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
+ * The frame must belong to the FD, if one is specified.
+ *
+ * cmd: MMUEXT_NEW_BASEPTR
+ * mfn: Machine frame number of new page-table base to install in MMU.
+ *
+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
+ * mfn: Machine frame number of new page-table base to install in MMU
+ * when in user space.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
+ * No additional arguments. Flushes local TLB.
+ *
+ * cmd: MMUEXT_INVLPG_LOCAL
+ * linear_addr: Linear address to be flushed from the local TLB.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_MULTI
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_INVLPG_MULTI
+ * linear_addr: Linear address to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_ALL
+ * No additional arguments. Flushes all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_INVLPG_ALL
+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE
+ * No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
+ * cmd: MMUEXT_SET_LDT
+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
+ * nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
+ */
+/* ` enum mmuext_cmd { */
+#define MMUEXT_PIN_L1_TABLE 0
+#define MMUEXT_PIN_L2_TABLE 1
+#define MMUEXT_PIN_L3_TABLE 2
+#define MMUEXT_PIN_L4_TABLE 3
+#define MMUEXT_UNPIN_TABLE 4
+#define MMUEXT_NEW_BASEPTR 5
+#define MMUEXT_TLB_FLUSH_LOCAL 6
+#define MMUEXT_INVLPG_LOCAL 7
+#define MMUEXT_TLB_FLUSH_MULTI 8
+#define MMUEXT_INVLPG_MULTI 9
+#define MMUEXT_TLB_FLUSH_ALL 10
+#define MMUEXT_INVLPG_ALL 11
+#define MMUEXT_FLUSH_CACHE 12
+#define MMUEXT_SET_LDT 13
+#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
+/* ` } */
+
+#ifndef __ASSEMBLY__
+struct mmuext_op {
+ unsigned int cmd; /* => enum mmuext_cmd */
+ union {
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
+ xen_pfn_t mfn;
+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
+ unsigned long linear_addr;
+ } arg1;
+ union {
+ /* SET_LDT */
+ unsigned int nr_ents;
+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(const_void) vcpumask;
+#else
+ const void *vcpumask;
+#endif
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
+ } arg2;
+};
+typedef struct mmuext_op mmuext_op_t;
+DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
+#endif
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
+ * ` enum uvm_flags flags)
+ * `
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
+ * ` enum uvm_flags flags,
+ * ` domid_t domid)
+ * `
+ * ` @va: The virtual address whose mapping we want to change
+ * ` @val: The new page table entry, must contain a machine address
+ * ` @flags: Control TLB flushes
+ */
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+/* ` enum uvm_flags { */
+#define UVMF_NONE (xen_mk_ulong(0)<<0) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (xen_mk_ulong(1)<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (xen_mk_ulong(2)<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (xen_mk_ulong(3)<<0)
+#define UVMF_MULTI (xen_mk_ulong(0)<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (xen_mk_ulong(0)<<2) /* Flush local TLB. */
+#define UVMF_ALL (xen_mk_ulong(1)<<2) /* Flush all TLBs. */
+/* ` } */
+
+/*
+ * ` int
+ * ` HYPERVISOR_console_io(unsigned int cmd,
+ * ` unsigned int count,
+ * ` char buffer[]);
+ *
+ * @cmd: Command (see below)
+ * @count: Size of the buffer to read/write
+ * @buffer: Pointer in the guest memory
+ *
+ * List of commands:
+ *
+ * * CONSOLEIO_write: Write the buffer to Xen console.
+ * For the hardware domain, all the characters in the buffer will
+ * be written. Characters will be printed directly to the console.
+ * For all the other domains, only the printable characters will be
+ * written. Characters may be buffered until a newline (i.e '\n') is
+ * found.
+ * @return 0 on success, otherwise return an error code.
+ * * CONSOLEIO_read: Attempts to read up to @count characters from Xen
+ * console. The maximum buffer size (i.e. @count) supported is 2GB.
+ * @return the number of characters read on success, otherwise return
+ * an error code.
+ */
+#define CONSOLEIO_write 0
+#define CONSOLEIO_read 1
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable 0
+#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
+#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
+#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
+#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
+#define VMASST_TYPE_pae_extended_cr3 3
+
+/*
+ * x86 guests: Sane behaviour for virtual iopl
+ * - virtual iopl updated from do_iret() hypercalls.
+ * - virtual iopl reported in bounce frames.
+ * - guest kernels assumed to be level 0 for the purpose of iopl checks.
+ */
+#define VMASST_TYPE_architectural_iopl 4
+
+/*
+ * All guests: activate update indicator in vcpu_runstate_info
+ * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
+ * vcpu_runstate_info during updates of the runstate information.
+ */
+#define VMASST_TYPE_runstate_update_flag 5
+
+/*
+ * x86/64 guests: strictly hide M2P from user mode.
+ * This allows the guest to control respective hypervisor behavior:
+ * - when not set, L4 tables get created with the respective slot blank,
+ * and whenever the L4 table gets used as a kernel one the missing
+ * mapping gets inserted,
+ * - when set, L4 tables get created with the respective slot initialized
+ * as before, and whenever the L4 table gets used as a user one the
+ * mapping gets zapped.
+ */
+#define VMASST_TYPE_m2p_strict 32
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+#define MAX_VMASST_TYPE 3
+#endif
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED xen_mk_uint(0x7FF0)
+
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF xen_mk_uint(0x7FF0)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense as HYPERVISOR_mmu_update()'s and
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument. For
+ * HYPERVISOR_mmu_update() context it can be specified by any calling domain,
+ * otherwise it's only permitted if the caller is privileged.
+ */
+#define DOMID_IO xen_mk_uint(0x7FF1)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense as
+ * - HYPERVISOR_mmu_update()'s, HYPERVISOR_mmuext_op()'s, or
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument,
+ * - with XENMAPSPACE_gmfn_foreign,
+ * and is only permitted if the caller is privileged.
+ */
+#define DOMID_XEN xen_mk_uint(0x7FF2)
+
+/*
+ * DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW xen_mk_uint(0x7FF3)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID xen_mk_uint(0x7FF4)
+
+/* Idle domain. */
+#define DOMID_IDLE xen_mk_uint(0x7FFF)
+
+/* Mask for valid domain id values */
+#define DOMID_MASK xen_mk_uint(0x7FFF)
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+struct mmu_update {
+ uint64_t ptr; /* Machine address of PTE. */
+ uint64_t val; /* New contents of PTE. */
+};
+typedef struct mmu_update mmu_update_t;
+DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_multicall(multicall_entry_t call_list[],
+ * ` uint32_t nr_calls);
+ *
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
+ */
+struct multicall_entry {
+ xen_ulong_t op, result;
+ xen_ulong_t args[6];
+};
+typedef struct multicall_entry multicall_entry_t;
+DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+/*
+ * Event channel endpoints per domain (when using the 2-level ABI):
+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
+#endif
+
+struct vcpu_time_info {
+ /*
+ * Updates to the following values are preceded and followed by an
+ * increment of 'version'. The guest can therefore detect updates by
+ * looking for changes to 'version'. If the least-significant bit of
+ * the version number is set then an update is in progress and the guest
+ * must wait to read a consistent set of values.
+ * The correct way to interact with the version number is similar to
+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
+ */
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
+ uint64_t system_time; /* Time, in nanosecs, since boot. */
+ /*
+ * Current system time:
+ * system_time +
+ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
+ * CPU frequency (Hz):
+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
+ */
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+#if __XEN_INTERFACE_VERSION__ > 0x040600
+ uint8_t flags;
+ uint8_t pad1[2];
+#else
+ int8_t pad1[3];
+#endif
+}; /* 32 bytes */
+typedef struct vcpu_time_info vcpu_time_info_t;
+
+#define XEN_PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define XEN_PVCLOCK_GUEST_STOPPED (1 << 1)
+
+struct vcpu_info {
+ /*
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+ * a pending notification for a particular VCPU. It is then cleared
+ * by the guest OS /before/ checking for pending work, thus avoiding
+ * a set-and-check race. Note that the mask is only accessed by Xen
+ * on the CPU that is currently hosting the VCPU. This means that the
+ * pending and mask flags can be updated by the guest without special
+ * synchronisation (i.e., no need for the x86 LOCK prefix).
+ * This may seem suboptimal because if the pending flag is set by
+ * a different CPU then an IPI may be scheduled even when the mask
+ * is set. However, note:
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
+ * channel mask bits. A 'noisy' event that is continually being
+ * triggered can be masked at source at this very precise
+ * granularity.
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
+ * reentrant execution: whether for concurrency control, or to
+ * prevent unbounded stack usage. Whatever the purpose, we expect
+ * that the mask will be asserted only for short periods at a time,
+ * and so the likelihood of a 'spurious' IPI is suitably small.
+ * The mask is read before making an event upcall to the guest: a
+ * non-zero mask therefore guarantees that the VCPU will not receive
+ * an upcall activation. The mask is cleared when the VCPU requests
+ * to block: this avoids wakeup-waiting races.
+ */
+ uint8_t evtchn_upcall_pending;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ uint8_t evtchn_upcall_mask;
+#else /* XEN_HAVE_PV_UPCALL_MASK */
+ uint8_t pad0;
+#endif /* XEN_HAVE_PV_UPCALL_MASK */
+ xen_ulong_t evtchn_pending_sel;
+ struct arch_vcpu_info arch;
+ vcpu_time_info_t time;
+}; /* 64 bytes (x86) */
+#ifndef __XEN__
+typedef struct vcpu_info vcpu_info_t;
+#endif
+
+/*
+ * `incontents 200 startofday_shared Start-of-day shared data structure
+ * Xen/kernel shared data -- pointer provided in start_info.
+ *
+ * This structure is defined to be both smaller than a page, and the
+ * only data on the shared page, but may vary in actual size even within
+ * compatible Xen versions; guests should not rely on the size
+ * of this structure remaining constant.
+ */
+struct shared_info {
+ struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
+
+ /*
+ * A domain can create "event channels" on which it can send and receive
+ * asynchronous event notifications. There are three classes of event that
+ * are delivered by this mechanism:
+ * 1. Bi-directional inter- and intra-domain connections. Domains must
+ * arrange out-of-band to set up a connection (usually by allocating
+ * an unbound 'listener' port and avertising that via a storage service
+ * such as xenstore).
+ * 2. Physical interrupts. A domain with suitable hardware-access
+ * privileges can bind an event-channel port to a physical interrupt
+ * source.
+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
+ * port to a virtual interrupt source, such as the virtual-timer
+ * device or the emergency console.
+ *
+ * Event channels are addressed by a "port index". Each channel is
+ * associated with two bits of information:
+ * 1. PENDING -- notifies the domain that there is a pending notification
+ * to be processed. This bit is cleared by the guest.
+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+ * will cause an asynchronous upcall to be scheduled. This bit is only
+ * updated by the guest. It is read-only within Xen. If a channel
+ * becomes pending while the channel is masked then the 'edge' is lost
+ * (i.e., when the channel is unmasked, the guest must manually handle
+ * pending notifications as no upcall will be scheduled by Xen).
+ *
+ * To expedite scanning of pending notifications, any 0->1 pending
+ * transition on an unmasked channel causes a corresponding bit in a
+ * per-vcpu selector word to be set. Each bit in the selector covers a
+ * 'C long' in the PENDING bitfield array.
+ */
+ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
+ xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
+
+ /*
+ * Wallclock time: updated by control software or RTC emulation.
+ * Guests should base their gettimeofday() syscall on this
+ * wallclock-base value.
+ * The values of wc_sec and wc_nsec are offsets from the Unix epoch
+ * adjusted by the domain's 'time offset' (in seconds) as set either
+ * by XEN_DOMCTL_settimeoffset, or adjusted via a guest write to the
+ * emulated RTC.
+ */
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+#if !defined(__i386__)
+ uint32_t wc_sec_hi;
+# define xen_wc_sec_hi wc_sec_hi
+#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
+# define xen_wc_sec_hi arch.wc_sec_hi
+#endif
+
+ struct arch_shared_info arch;
+
+};
+#ifndef __XEN__
+typedef struct shared_info shared_info_t;
+#endif
+
+/*
+ * `incontents 200 startofday Start-of-day memory layout
+ *
+ * 1. The domain is started within contiguous virtual-memory region.
+ * 2. The contiguous region ends on an aligned 4MB boundary.
+ * 3. This the order of bootstrap elements in the initial virtual region:
+ * a. relocated kernel image
+ * b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
+ * c. list of allocated page frames [mfn_list, nr_pages]
+ * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
+ * d. start_info_t structure [register rSI (x86)]
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base and CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
+ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * layout for the domain. In particular, the bootstrap virtual-memory
+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
+ * only exception is the bootstrap page table, which is mapped read-only.
+ * 7. There is guaranteed to be at least 512kB padding after the final
+ * bootstrap element. If necessary, the bootstrap virtual region is
+ * extended by an extra 4MB to ensure this.
+ *
+ * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
+ * table layout") a bug caused the pt_base (3.g above) and cr3 to not point
+ * to the start of the guest page tables (it was offset by two pages).
+ * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
+ * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
+ * allocated in the order: 'first L1','first L2', 'first L3', so the offset
+ * to the page table base is by two pages back. The initial domain if it is
+ * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the
+ * pages preceding pt_base and mark them as reserved/unused.
+ */
+#ifdef XEN_HAVE_PV_GUEST_ENTRY
+struct start_info {
+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
+ char magic[32]; /* "xen-<version>-<platform>". */
+ unsigned long nr_pages; /* Total pages allocated to this domain. */
+ unsigned long shared_info; /* MACHINE address of shared info struct. */
+ uint32_t flags; /* SIF_xxx flags. */
+ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
+ uint32_t store_evtchn; /* Event channel for store communication. */
+ union {
+ struct {
+ xen_pfn_t mfn; /* MACHINE page number of console page. */
+ uint32_t evtchn; /* Event channel for console page. */
+ } domU;
+ struct {
+ uint32_t info_off; /* Offset of console_info struct. */
+ uint32_t info_size; /* Size of console_info struct from start.*/
+ } dom0;
+ } console;
+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
+ unsigned long pt_base; /* VIRTUAL address of page directory. */
+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module */
+ /* (PFN of pre-loaded module if */
+ /* SIF_MOD_START_PFN set in flags). */
+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+#define MAX_GUEST_CMDLINE 1024
+ int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
+};
+typedef struct start_info start_info_t;
+
+/* New console union for dom0 introduced in 0x00030203. */
+#if __XEN_INTERFACE_VERSION__ < 0x00030203
+#define console_mfn console.domU.mfn
+#define console_evtchn console.domU.evtchn
+#endif
+#endif /* XEN_HAVE_PV_GUEST_ENTRY */
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list
+{
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+/*
+ * `incontents 200 startofday_dom0_console Dom0_console
+ *
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
+typedef struct dom0_vga_console_info {
+ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
+#define XEN_VGATYPE_TEXT_MODE_3 0x03
+#define XEN_VGATYPE_VESA_LFB 0x23
+#define XEN_VGATYPE_EFI_LFB 0x70
+
+ union {
+ struct {
+ /* Font height, in pixels. */
+ uint16_t font_height;
+ /* Cursor location (column, row). */
+ uint16_t cursor_x, cursor_y;
+ /* Number of rows and columns (dimensions in characters). */
+ uint16_t rows, columns;
+ } text_mode_3;
+
+ struct {
+ /* Width and height, in pixels. */
+ uint16_t width, height;
+ /* Bytes per scan line. */
+ uint16_t bytes_per_line;
+ /* Bits per pixel. */
+ uint16_t bits_per_pixel;
+ /* LFB physical address, and size (in units of 64kB). */
+ uint32_t lfb_base;
+ uint32_t lfb_size;
+ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
+ uint8_t red_pos, red_size;
+ uint8_t green_pos, green_size;
+ uint8_t blue_pos, blue_size;
+ uint8_t rsvd_pos, rsvd_size;
+#if __XEN_INTERFACE_VERSION__ >= 0x00030206
+ /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
+ uint32_t gbl_caps;
+ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
+ uint16_t mode_attrs;
+ uint16_t pad;
+#endif
+#if __XEN_INTERFACE_VERSION__ >= 0x00040d00
+ /* high 32 bits of lfb_base */
+ uint32_t ext_lfb_base;
+#endif
+ } vesa_lfb;
+ } u;
+} dom0_vga_console_info_t;
+#define xen_vga_console_info dom0_vga_console_info
+#define xen_vga_console_info_t dom0_vga_console_info_t
+
+typedef uint8_t xen_domain_handle_t[16];
+
+__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
+__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
+__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
+__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
+
+typedef struct {
+ uint8_t a[16];
+} xen_uuid_t;
+
+/*
+ * XEN_DEFINE_UUID(0x00112233, 0x4455, 0x6677, 0x8899,
+ * 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)
+ * will construct UUID 00112233-4455-6677-8899-aabbccddeeff presented as
+ * {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ * 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
+ *
+ * NB: This is compatible with Linux kernel and with libuuid, but it is not
+ * compatible with Microsoft, as they use mixed-endian encoding (some
+ * components are little-endian, some are big-endian).
+ */
+#define XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ {{((a) >> 24) & 0xFF, ((a) >> 16) & 0xFF, \
+ ((a) >> 8) & 0xFF, ((a) >> 0) & 0xFF, \
+ ((b) >> 8) & 0xFF, ((b) >> 0) & 0xFF, \
+ ((c) >> 8) & 0xFF, ((c) >> 0) & 0xFF, \
+ ((d) >> 8) & 0xFF, ((d) >> 0) & 0xFF, \
+ e1, e2, e3, e4, e5, e6}}
+
+#if defined(__STDC_VERSION__) ? __STDC_VERSION__ >= 199901L : defined(__GNUC__)
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ ((xen_uuid_t)XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6))
+#else
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6)
+#endif /* __STDC_VERSION__ / __GNUC__ */
+
+#endif /* !__ASSEMBLY__ */
+
+/* Default definitions for macros used by domctl/sysctl. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#ifndef int64_aligned_t
+#define int64_aligned_t int64_t
+#endif
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+#ifndef XEN_GUEST_HANDLE_64
+#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
+#endif
+
+#ifndef __ASSEMBLY__
+struct xenctl_bitmap {
+ XEN_GUEST_HANDLE_64(uint8) bitmap;
+ uint32_t nr_bits;
+};
+typedef struct xenctl_bitmap xenctl_bitmap_t;
+#endif
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#endif /* __XEN_PUBLIC_XEN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/hw/xen/xen-backend.h b/include/hw/xen/xen-backend.h
index 010d712638..0f01631ae7 100644
--- a/include/hw/xen/xen-backend.h
+++ b/include/hw/xen/xen-backend.h
@@ -31,7 +31,9 @@ void xen_backend_set_device(XenBackendInstance *backend,
XenDevice *xen_backend_get_device(XenBackendInstance *backend);
void xen_backend_register(const XenBackendInfo *info);
+const char **xen_backend_get_types(unsigned int *nr);
+bool xen_backend_exists(const char *type, const char *name);
void xen_backend_device_create(XenBus *xenbus, const char *type,
const char *name, QDict *opts, Error **errp);
bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp);
diff --git a/include/hw/xen/xen-block.h b/include/hw/xen/xen-block.h
index 2cd2fc2701..d692ea7580 100644
--- a/include/hw/xen/xen-block.h
+++ b/include/hw/xen/xen-block.h
@@ -12,6 +12,7 @@
#include "hw/block/block.h"
#include "hw/block/dataplane/xen-block.h"
#include "sysemu/iothread.h"
+#include "qom/object.h"
typedef enum XenBlockVdevType {
XEN_BLOCK_VDEV_TYPE_INVALID,
@@ -46,7 +47,7 @@ typedef struct XenBlockIOThread {
char *id;
} XenBlockIOThread;
-typedef struct XenBlockDevice {
+struct XenBlockDevice {
XenDevice xendev;
XenBlockProperties props;
const char *device_type;
@@ -54,41 +55,35 @@ typedef struct XenBlockDevice {
XenBlockDataPlane *dataplane;
XenBlockDrive *drive;
XenBlockIOThread *iothread;
-} XenBlockDevice;
+};
+typedef struct XenBlockDevice XenBlockDevice;
typedef void (*XenBlockDeviceRealize)(XenBlockDevice *blockdev, Error **errp);
typedef void (*XenBlockDeviceUnrealize)(XenBlockDevice *blockdev);
-typedef struct XenBlockDeviceClass {
+struct XenBlockDeviceClass {
/*< private >*/
XenDeviceClass parent_class;
/*< public >*/
XenBlockDeviceRealize realize;
XenBlockDeviceUnrealize unrealize;
-} XenBlockDeviceClass;
+};
#define TYPE_XEN_BLOCK_DEVICE "xen-block"
-#define XEN_BLOCK_DEVICE(obj) \
- OBJECT_CHECK(XenBlockDevice, (obj), TYPE_XEN_BLOCK_DEVICE)
-#define XEN_BLOCK_DEVICE_CLASS(class) \
- OBJECT_CLASS_CHECK(XenBlockDeviceClass, (class), TYPE_XEN_BLOCK_DEVICE)
-#define XEN_BLOCK_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenBlockDeviceClass, (obj), TYPE_XEN_BLOCK_DEVICE)
-
-typedef struct XenDiskDevice {
+OBJECT_DECLARE_TYPE(XenBlockDevice, XenBlockDeviceClass, XEN_BLOCK_DEVICE)
+
+struct XenDiskDevice {
XenBlockDevice blockdev;
-} XenDiskDevice;
+};
#define TYPE_XEN_DISK_DEVICE "xen-disk"
-#define XEN_DISK_DEVICE(obj) \
- OBJECT_CHECK(XenDiskDevice, (obj), TYPE_XEN_DISK_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(XenDiskDevice, XEN_DISK_DEVICE)
-typedef struct XenCDRomDevice {
+struct XenCDRomDevice {
XenBlockDevice blockdev;
-} XenCDRomDevice;
+};
#define TYPE_XEN_CDROM_DEVICE "xen-cdrom"
-#define XEN_CDROM_DEVICE(obj) \
- OBJECT_CHECK(XenCDRomDevice, (obj), TYPE_XEN_CDROM_DEVICE)
+OBJECT_DECLARE_SIMPLE_TYPE(XenCDRomDevice, XEN_CDROM_DEVICE)
#endif /* HW_XEN_BLOCK_H */
diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h
index 4c0f747445..d8dcc2f010 100644
--- a/include/hw/xen/xen-bus-helper.h
+++ b/include/hw/xen/xen-bus-helper.h
@@ -8,38 +8,40 @@
#ifndef HW_XEN_BUS_HELPER_H
#define HW_XEN_BUS_HELPER_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
const char *xs_strstate(enum xenbus_state state);
-void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
- const char *node, struct xs_permissions perms[],
- unsigned int nr_perms, Error **errp);
-void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid,
+ const char *node, unsigned int owner, unsigned int domid,
+ unsigned int perms, Error **errp);
+void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, Error **errp);
/* Write to node/key unless node is empty, in which case write to key */
-void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
- GCC_FMT_ATTR(6, 0);
-void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
+ G_GNUC_PRINTF(6, 0);
+void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
/* Read from node/key unless node is empty, in which case read from key */
-int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
- const char *fmt, va_list ap);
-int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
+ const char *fmt, va_list ap)
+ G_GNUC_SCANF(6, 0);
+int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
- const char *fmt, ...);
+ const char *fmt, ...)
+ G_GNUC_SCANF(6, 7);
/* Watch node/key unless node is empty, in which case watch key */
-void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
- char *token, Error **errp);
-void xs_node_unwatch(struct xs_handle *xsh, const char *node, const char *key,
- const char *token, Error **errp);
+struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
+ const char *key, xs_watch_fn fn,
+ void *opaque, Error **errp);
+void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
#endif /* HW_XEN_BUS_HELPER_H */
diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h
index 4ec0bb072f..38d40afa37 100644
--- a/include/hw/xen/xen-bus.h
+++ b/include/hw/xen/xen-bus.h
@@ -8,35 +8,32 @@
#ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/sysbus.h"
#include "qemu/notify.h"
+#include "qom/object.h"
-typedef void (*XenWatchHandler)(void *opaque);
-
-typedef struct XenWatchList XenWatchList;
-typedef struct XenWatch XenWatch;
typedef struct XenEventChannel XenEventChannel;
-typedef struct XenDevice {
+struct XenDevice {
DeviceState qdev;
domid_t frontend_id;
char *name;
- struct xs_handle *xsh;
- XenWatchList *watch_list;
+ struct qemu_xs_handle *xsh;
char *backend_path, *frontend_path;
enum xenbus_state backend_state, frontend_state;
Notifier exit;
- XenWatch *backend_state_watch, *frontend_state_watch;
+ struct qemu_xs_watch *backend_state_watch, *frontend_state_watch;
bool backend_online;
- XenWatch *backend_online_watch;
+ struct qemu_xs_watch *backend_online_watch;
xengnttab_handle *xgth;
- bool feature_grant_copy;
bool inactive;
QLIST_HEAD(, XenEventChannel) event_channels;
QLIST_ENTRY(XenDevice) list;
-} XenDevice;
+};
+typedef struct XenDevice XenDevice;
+typedef char *(*XenDeviceGetFrontendPath)(XenDevice *xendev, Error **errp);
typedef char *(*XenDeviceGetName)(XenDevice *xendev, Error **errp);
typedef void (*XenDeviceRealize)(XenDevice *xendev, Error **errp);
typedef void (*XenDeviceFrontendChanged)(XenDevice *xendev,
@@ -44,47 +41,39 @@ typedef void (*XenDeviceFrontendChanged)(XenDevice *xendev,
Error **errp);
typedef void (*XenDeviceUnrealize)(XenDevice *xendev);
-typedef struct XenDeviceClass {
+struct XenDeviceClass {
/*< private >*/
DeviceClass parent_class;
/*< public >*/
const char *backend;
const char *device;
+ XenDeviceGetFrontendPath get_frontend_path;
XenDeviceGetName get_name;
XenDeviceRealize realize;
XenDeviceFrontendChanged frontend_changed;
XenDeviceUnrealize unrealize;
-} XenDeviceClass;
+};
#define TYPE_XEN_DEVICE "xen-device"
-#define XEN_DEVICE(obj) \
- OBJECT_CHECK(XenDevice, (obj), TYPE_XEN_DEVICE)
-#define XEN_DEVICE_CLASS(class) \
- OBJECT_CLASS_CHECK(XenDeviceClass, (class), TYPE_XEN_DEVICE)
-#define XEN_DEVICE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenDeviceClass, (obj), TYPE_XEN_DEVICE)
-
-typedef struct XenBus {
+OBJECT_DECLARE_TYPE(XenDevice, XenDeviceClass, XEN_DEVICE)
+
+struct XenBus {
BusState qbus;
domid_t backend_id;
- struct xs_handle *xsh;
- XenWatchList *watch_list;
- XenWatch *backend_watch;
+ struct qemu_xs_handle *xsh;
+ unsigned int backend_types;
+ struct qemu_xs_watch **backend_watch;
QLIST_HEAD(, XenDevice) inactive_devices;
-} XenBus;
+};
-typedef struct XenBusClass {
+struct XenBusClass {
/*< private >*/
BusClass parent_class;
-} XenBusClass;
+};
#define TYPE_XEN_BUS "xen-bus"
-#define XEN_BUS(obj) \
- OBJECT_CHECK(XenBus, (obj), TYPE_XEN_BUS)
-#define XEN_BUS_CLASS(class) \
- OBJECT_CLASS_CHECK(XenBusClass, (class), TYPE_XEN_BUS)
-#define XEN_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XenBusClass, (obj), TYPE_XEN_BUS)
+OBJECT_DECLARE_TYPE(XenBus, XenBusClass,
+ XEN_BUS)
void xen_bus_init(void);
@@ -94,20 +83,21 @@ enum xenbus_state xen_device_backend_get_state(XenDevice *xendev);
void xen_device_backend_printf(XenDevice *xendev, const char *key,
const char *fmt, ...)
- GCC_FMT_ATTR(3, 4);
+ G_GNUC_PRINTF(3, 4);
void xen_device_frontend_printf(XenDevice *xendev, const char *key,
const char *fmt, ...)
- GCC_FMT_ATTR(3, 4);
+ G_GNUC_PRINTF(3, 4);
int xen_device_frontend_scanf(XenDevice *xendev, const char *key,
- const char *fmt, ...);
+ const char *fmt, ...)
+ G_GNUC_SCANF(3, 4);
void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
Error **errp);
void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot,
Error **errp);
-void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs,
unsigned int nr_refs, Error **errp);
typedef struct XenDeviceGrantCopySegment {
@@ -141,5 +131,6 @@ void xen_device_notify_event_channel(XenDevice *xendev,
void xen_device_unbind_event_channel(XenDevice *xendev,
XenEventChannel *channel,
Error **errp);
+unsigned int xen_event_channel_get_local_port(XenEventChannel *channel);
#endif /* HW_XEN_BUS_H */
diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
new file mode 100644
index 0000000000..65a51aac2e
--- /dev/null
+++ b/include/hw/xen/xen-hvm-common.h
@@ -0,0 +1,98 @@
+#ifndef HW_XEN_HVM_COMMON_H
+#define HW_XEN_HVM_COMMON_H
+
+#include "qemu/units.h"
+
+#include "cpu.h"
+#include "hw/pci/pci.h"
+#include "hw/hw.h"
+#include "hw/xen/xen_native.h"
+#include "hw/xen/xen-legacy-backend.h"
+#include "sysemu/runstate.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/xen.h"
+#include "sysemu/xen-mapcache.h"
+#include "qemu/error-report.h"
+#include <xen/hvm/ioreq.h>
+
+extern MemoryRegion xen_memory;
+extern MemoryListener xen_io_listener;
+extern DeviceListener xen_device_listener;
+
+//#define DEBUG_XEN_HVM
+
+#ifdef DEBUG_XEN_HVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
+{
+ return shared_page->vcpu_ioreq[i].vp_eport;
+}
+static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
+{
+ return &shared_page->vcpu_ioreq[vcpu];
+}
+
+#define BUFFER_IO_MAX_DELAY 100
+
+typedef struct XenPhysmap {
+ hwaddr start_addr;
+ ram_addr_t size;
+ const char *name;
+ hwaddr phys_offset;
+
+ QLIST_ENTRY(XenPhysmap) list;
+} XenPhysmap;
+
+typedef struct XenPciDevice {
+ PCIDevice *pci_dev;
+ uint32_t sbdf;
+ QLIST_ENTRY(XenPciDevice) entry;
+} XenPciDevice;
+
+typedef struct XenIOState {
+ ioservid_t ioservid;
+ shared_iopage_t *shared_page;
+ buffered_iopage_t *buffered_io_page;
+ xenforeignmemory_resource_handle *fres;
+ QEMUTimer *buffered_io_timer;
+ CPUState **cpu_by_vcpu_id;
+ /* the evtchn port for polling the notification, */
+ evtchn_port_t *ioreq_local_port;
+ /* evtchn remote and local ports for buffered io */
+ evtchn_port_t bufioreq_remote_port;
+ evtchn_port_t bufioreq_local_port;
+ /* the evtchn fd for polling */
+ xenevtchn_handle *xce_handle;
+ /* which vcpu we are serving */
+ int send_vcpu;
+
+ struct xs_handle *xenstore;
+ MemoryListener memory_listener;
+ MemoryListener io_listener;
+ QLIST_HEAD(, XenPciDevice) dev_list;
+ DeviceListener device_listener;
+
+ Notifier exit;
+} XenIOState;
+
+void xen_exit_notifier(Notifier *n, void *data);
+
+void xen_region_add(MemoryListener *listener, MemoryRegionSection *section);
+void xen_region_del(MemoryListener *listener, MemoryRegionSection *section);
+void xen_io_add(MemoryListener *listener, MemoryRegionSection *section);
+void xen_io_del(MemoryListener *listener, MemoryRegionSection *section);
+void xen_device_realize(DeviceListener *listener, DeviceState *dev);
+void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
+
+void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ const MemoryListener *xen_memory_listener);
+
+void cpu_ioreq_pio(ioreq_t *req);
+#endif /* HW_XEN_HVM_COMMON_H */
diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h
index 5e6c56c4d6..2cca174778 100644
--- a/include/hw/xen/xen-legacy-backend.h
+++ b/include/hw/xen/xen-legacy-backend.h
@@ -1,19 +1,21 @@
#ifndef HW_XEN_LEGACY_BACKEND_H
#define HW_XEN_LEGACY_BACKEND_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h"
#include "net/net.h"
+#include "qom/object.h"
#define TYPE_XENSYSDEV "xen-sysdev"
#define TYPE_XENSYSBUS "xen-sysbus"
#define TYPE_XENBACKEND "xen-backend"
-#define XENBACKEND_DEVICE(obj) \
- OBJECT_CHECK(XenLegacyDevice, (obj), TYPE_XENBACKEND)
+typedef struct XenLegacyDevice XenLegacyDevice;
+DECLARE_INSTANCE_CHECKER(XenLegacyDevice, XENBACKEND,
+ TYPE_XENBACKEND)
/* variables */
-extern struct xs_handle *xenstore;
+extern struct qemu_xs_handle *xenstore;
extern const char *xen_protocol;
extern DeviceState *xen_sysdev;
extern BusState *xen_sysbus;
@@ -28,9 +30,6 @@ int xenstore_write_be_int64(struct XenLegacyDevice *xendev, const char *node,
char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
-void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev);
-void xenstore_update_be(char *watch, char *type, int dom,
- struct XenDevOps *ops);
char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
@@ -40,8 +39,7 @@ int xenstore_read_fe_uint64(struct XenLegacyDevice *xendev, const char *node,
void xen_be_check_state(struct XenLegacyDevice *xendev);
/* xen backend driver bits */
-int xen_be_init(void);
-void xen_be_register_common(void);
+void xen_be_init(void);
int xen_be_register(const char *type, struct XenDevOps *ops);
int xen_be_set_state(struct XenLegacyDevice *xendev, enum xenbus_state state);
int xen_be_bind_evtchn(struct XenLegacyDevice *xendev);
@@ -50,18 +48,7 @@ void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev,
void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot);
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
- unsigned int nr_refs);
-
-typedef struct XenGrantCopySegment {
- union {
- void *virt;
- struct {
- uint32_t ref;
- off_t offset;
- } foreign;
- } source, dest;
- size_t len;
-} XenGrantCopySegment;
+ uint32_t *refs, unsigned int nr_refs);
int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
bool to_domain, XenGrantCopySegment segs[],
@@ -74,9 +61,9 @@ static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev,
}
static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev,
- void *ptr)
+ void *ptr, uint32_t ref)
{
- return xen_be_unmap_grant_refs(xendev, ptr, 1);
+ return xen_be_unmap_grant_refs(xendev, ptr, &ref, 1);
}
/* actual backend drivers */
@@ -94,8 +81,6 @@ extern struct XenDevOps xen_usb_ops; /* xen-usb.c */
/* configuration (aka xenbus setup) */
void xen_config_cleanup(void);
-int xen_config_dev_blk(DriveInfo *disk);
-int xen_config_dev_nic(NICInfo *nic);
int xen_config_dev_vfb(int vdev, const char *type);
int xen_config_dev_vkbd(int vdev);
int xen_config_dev_console(int vdev);
diff --git a/include/hw/xen/xen-x86.h b/include/hw/xen/xen-x86.h
new file mode 100644
index 0000000000..85e3db1b8d
--- /dev/null
+++ b/include/hw/xen/xen-x86.h
@@ -0,0 +1,15 @@
+/*
+ * Xen X86-specific
+ *
+ * Copyright 2020 Red Hat, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef QEMU_HW_XEN_X86_H
+#define QEMU_HW_XEN_X86_H
+
+#include "hw/i386/pc.h"
+
+void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory);
+
+#endif /* QEMU_HW_XEN_X86_H */
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index 771dd447f2..37ecc91fc3 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -1,19 +1,36 @@
-#ifndef QEMU_HW_XEN_H
-#define QEMU_HW_XEN_H
-
/*
* public xen header
* stuff needed outside xen-*.c, i.e. interfaces to qemu.
* must not depend on any xen headers being present in
* /usr/include/xen, so it can be included unconditionally.
*/
+#ifndef QEMU_HW_XEN_H
+#define QEMU_HW_XEN_H
+
+/*
+ * C files using Xen toolstack libraries will have included those headers
+ * already via xen_native.h, and having __XEM_TOOLS__ defined will have
+ * automatically set __XEN_INTERFACE_VERSION__ to the latest supported
+ * by the *system* Xen headers which were transitively included.
+ *
+ * C files which are part of the internal emulation, and which did not
+ * include xen_native.h, may need this defined so that the Xen headers
+ * imported to include/hw/xen/interface/ will expose the appropriate API
+ * version.
+ *
+ * This is why there's a rule that xen_native.h must be included first.
+ */
+#ifndef __XEN_INTERFACE_VERSION__
+#define __XEN_INTERFACE_VERSION__ 0x00040e00
+#endif
#include "exec/cpu-common.h"
/* xen-machine.c */
enum xen_mode {
- XEN_EMULATE = 0, // xen emulation, using xenner (default)
- XEN_ATTACH // attach to xen domain created by libxl
+ XEN_DISABLED = 0, /* xen support disabled (default) */
+ XEN_ATTACH, /* attach to xen domain created by libxl */
+ XEN_EMULATE, /* emulate Xen within QEMU */
};
extern uint32_t xen_domid;
@@ -21,17 +38,13 @@ extern enum xen_mode xen_mode;
extern bool xen_domid_restrict;
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
-void xen_piix3_set_irq(void *opaque, int irq_num, int level);
-void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
+int xen_set_pci_link_route(uint8_t link, uint8_t irq);
+void xen_intx_set_irq(void *opaque, int irq_num, int level);
void xen_hvm_inject_msi(uint64_t addr, uint32_t data);
int xen_is_pirq_msi(uint32_t msi_data);
qemu_irq *xen_interrupt_controller_init(void);
-void xenstore_store_pv_console_info(int i, struct Chardev *chr);
-
-void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory);
-
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */
diff --git a/include/hw/xen/xen_backend_ops.h b/include/hw/xen/xen_backend_ops.h
new file mode 100644
index 0000000000..90cca85f52
--- /dev/null
+++ b/include/hw/xen/xen_backend_ops.h
@@ -0,0 +1,408 @@
+/*
+ * QEMU Xen backend support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_BACKEND_OPS_H
+#define QEMU_XEN_BACKEND_OPS_H
+
+#include "hw/xen/xen.h"
+#include "hw/xen/interface/xen.h"
+#include "hw/xen/interface/io/xenbus.h"
+
+/*
+ * For the time being, these operations map fairly closely to the API of
+ * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
+ * from XenLegacyDevice back ends to the new XenDevice model, they may
+ * evolve to slightly higher-level APIs.
+ *
+ * The internal emulations do not emulate the Xen APIs entirely faithfully;
+ * only enough to be used by the Xen backend devices. For example, only one
+ * event channel can be bound to each handle, since that's sufficient for
+ * the device support (only the true Xen HVM backend uses more). And the
+ * behaviour of unmask() and pending() is different too because the device
+ * backends don't care.
+ */
+
+typedef struct xenevtchn_handle xenevtchn_handle;
+typedef int xenevtchn_port_or_error_t;
+typedef uint32_t evtchn_port_t;
+typedef uint16_t domid_t;
+typedef uint32_t grant_ref_t;
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT)
+#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
+
+#ifndef xen_rmb
+#define xen_rmb() smp_rmb()
+#endif
+#ifndef xen_wmb
+#define xen_wmb() smp_wmb()
+#endif
+#ifndef xen_mb
+#define xen_mb() smp_mb()
+#endif
+
+struct evtchn_backend_ops {
+ xenevtchn_handle *(*open)(void);
+ int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
+ evtchn_port_t guest_port);
+ int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
+ int (*close)(struct xenevtchn_handle *xc);
+ int (*get_fd)(struct xenevtchn_handle *xc);
+ int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*pending)(struct xenevtchn_handle *xc);
+};
+
+extern struct evtchn_backend_ops *xen_evtchn_ops;
+
+static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
+{
+ if (!xen_evtchn_ops) {
+ return NULL;
+ }
+ return xen_evtchn_ops->open();
+}
+
+static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
+ uint32_t domid,
+ evtchn_port_t guest_port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
+}
+
+static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unbind(xc, port);
+}
+
+static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->close(xc);
+}
+
+static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->get_fd(xc);
+}
+
+static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->notify(xc, port);
+}
+
+static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unmask(xc, port);
+}
+
+static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->pending(xc);
+}
+
+typedef struct xengntdev_handle xengnttab_handle;
+
+typedef struct XenGrantCopySegment {
+ union {
+ void *virt;
+ struct {
+ uint32_t ref;
+ off_t offset;
+ } foreign;
+ } source, dest;
+ size_t len;
+} XenGrantCopySegment;
+
+#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0)
+
+struct gnttab_backend_ops {
+ uint32_t features;
+ xengnttab_handle *(*open)(void);
+ int (*close)(xengnttab_handle *xgt);
+ int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs, uint32_t nr_segs,
+ Error **errp);
+ int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
+ void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot);
+ int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
+ uint32_t count);
+};
+
+extern struct gnttab_backend_ops *xen_gnttab_ops;
+
+static inline bool qemu_xen_gnttab_can_map_multi(void)
+{
+ return xen_gnttab_ops &&
+ !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
+}
+
+static inline xengnttab_handle *qemu_xen_gnttab_open(void)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->open();
+}
+
+static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->close(xgt);
+}
+
+static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
+ bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+
+ return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
+ errp);
+}
+
+static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
+ uint32_t nr_grants)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
+}
+
+static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
+ uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
+}
+
+static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
+}
+
+struct foreignmem_backend_ops {
+ void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
+ xen_pfn_t *pfns, int *errs);
+ int (*unmap)(void *addr, size_t pages);
+};
+
+extern struct foreignmem_backend_ops *xen_foreignmem_ops;
+
+static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
+ size_t pages, xen_pfn_t *pfns,
+ int *errs)
+{
+ if (!xen_foreignmem_ops) {
+ return NULL;
+ }
+ return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
+}
+
+static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
+{
+ if (!xen_foreignmem_ops) {
+ return -ENOSYS;
+ }
+ return xen_foreignmem_ops->unmap(addr, pages);
+}
+
+typedef void (*xs_watch_fn)(void *opaque, const char *path);
+
+struct qemu_xs_handle;
+struct qemu_xs_watch;
+typedef uint32_t xs_transaction_t;
+
+#define XBT_NULL 0
+
+#define XS_PERM_NONE 0x00
+#define XS_PERM_READ 0x01
+#define XS_PERM_WRITE 0x02
+
+struct xenstore_backend_ops {
+ struct qemu_xs_handle *(*open)(void);
+ void (*close)(struct qemu_xs_handle *h);
+ char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
+ char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num);
+ void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len);
+ bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len);
+ bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path);
+ bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path);
+ struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
+ xs_watch_fn fn, void *opaque);
+ void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
+ xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
+ bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
+ bool abort);
+};
+
+extern struct xenstore_backend_ops *xen_xenstore_ops;
+
+static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->open();
+}
+
+static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->close(h);
+}
+
+static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
+ unsigned int domid)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->get_domain_path(h, domid);
+}
+
+static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *num)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->directory(h, t, path, num);
+}
+
+static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *len)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->read(h, t, path, len);
+}
+
+static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ const void *data, unsigned int len)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->write(h, t, path, data, len);
+}
+
+static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
+ xs_transaction_t t, unsigned int owner,
+ unsigned int domid, unsigned int perms,
+ const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
+}
+
+static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->destroy(h, t, path);
+}
+
+static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
+ const char *path,
+ xs_watch_fn fn,
+ void *opaque)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->watch(h, path, fn, opaque);
+}
+
+static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
+ struct qemu_xs_watch *w)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->unwatch(h, w);
+}
+
+static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return XBT_NULL;
+ }
+ return xen_xenstore_ops->transaction_start(h);
+}
+
+static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
+ xs_transaction_t t, bool abort)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->transaction_end(h, t, abort);
+}
+
+void setup_xen_backend_ops(void);
+
+#endif /* QEMU_XEN_BACKEND_OPS_H */
diff --git a/include/hw/xen/xen_igd.h b/include/hw/xen/xen_igd.h
new file mode 100644
index 0000000000..7ffca06c10
--- /dev/null
+++ b/include/hw/xen/xen_igd.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Alex Novik <alex@neocleus.com>
+ * Allen Kay <allen.m.kay@intel.com>
+ * Guy Zana <guy@neocleus.com>
+ */
+#ifndef XEN_IGD_H
+#define XEN_IGD_H
+
+#include "hw/xen/xen-host-pci-device.h"
+
+typedef struct XenPCIPassthroughState XenPCIPassthroughState;
+
+bool xen_igd_gfx_pt_enabled(void);
+void xen_igd_gfx_pt_set(bool value, Error **errp);
+
+uint32_t igd_read_opregion(XenPCIPassthroughState *s);
+void xen_igd_reserve_slot(PCIBus *pci_bus);
+void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
+void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
+ XenHostPCIDevice *dev);
+
+static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev)
+{
+ return (xen_igd_gfx_pt_enabled()
+ && ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA));
+}
+
+#endif
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_native.h
index 82e56339dd..1a5ad693a4 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_native.h
@@ -1,5 +1,9 @@
-#ifndef QEMU_HW_XEN_COMMON_H
-#define QEMU_HW_XEN_COMMON_H
+#ifndef QEMU_HW_XEN_NATIVE_H
+#define QEMU_HW_XEN_NATIVE_H
+
+#ifdef __XEN_INTERFACE_VERSION__
+#error In Xen native files, include xen_native.h before other Xen headers
+#endif
/*
* If we have new enough libxenctrl then we do not want/need these compat
@@ -12,69 +16,19 @@
#include <xenctrl.h>
#include <xenstore.h>
-#include "hw/xen/interface/io/xenbus.h"
#include "hw/xen/xen.h"
-#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
#include "hw/xen/trace.h"
extern xc_interface *xen_xc;
/*
- * We don't support Xen prior to 4.2.0.
+ * We don't support Xen prior to 4.7.1.
*/
-/* Xen 4.2 through 4.6 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
-
-typedef xc_interface xenforeignmemory_handle;
-typedef xc_evtchn xenevtchn_handle;
-typedef xc_gnttab xengnttab_handle;
-typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
-
-#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
-#define xenevtchn_close(h) xc_evtchn_close(h)
-#define xenevtchn_fd(h) xc_evtchn_fd(h)
-#define xenevtchn_pending(h) xc_evtchn_pending(h)
-#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
-#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
-#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
-#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
-
-#define xengnttab_open(l, f) xc_gnttab_open(l, f)
-#define xengnttab_close(h) xc_gnttab_close(h)
-#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
-#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
-#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
-#define xengnttab_map_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_grant_refs(h, c, d, r, p)
-#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
-
-#define xenforeignmemory_open(l, f) xen_xc
-#define xenforeignmemory_close(h)
-
-static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
- int prot, size_t pages,
- const xen_pfn_t arr[/*pages*/],
- int err[/*pages*/])
-{
- if (err)
- return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
- else
- return xc_map_foreign_pages(h, dom, prot, arr, pages);
-}
-
-#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
-
-#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
-
-#include <xenevtchn.h>
-#include <xengnttab.h>
#include <xenforeignmemory.h>
-#endif
-
extern xenforeignmemory_handle *xen_fmem;
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
@@ -134,6 +88,12 @@ static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
return NULL;
}
+static inline int xenforeignmemory_unmap_resource(
+ xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
+{
+ return 0;
+}
+
#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
@@ -176,8 +136,6 @@ static inline xendevicemodel_handle *xendevicemodel_open(
return xen_xc;
}
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
-
static inline int xendevicemodel_create_ioreq_server(
xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
ioservid_t *id)
@@ -239,8 +197,6 @@ static inline int xendevicemodel_set_ioreq_server_state(
return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
}
-#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
-
static inline int xendevicemodel_set_pci_intx_level(
xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
@@ -310,12 +266,6 @@ static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
device, intx, level);
}
-static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
- uint8_t irq)
-{
- return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
-}
-
static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
uint32_t msi_data)
{
@@ -352,7 +302,7 @@ static inline int xen_restrict(domid_t domid)
void destroy_hvm_domain(bool reboot);
/* shutdown/destroy current domain because of an error */
-void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
#ifdef HVM_PARAM_VMPORT_REGS_PFN
static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
@@ -374,15 +324,6 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
}
#endif
-/* Xen before 4.6 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
-
-#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
-#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-#endif
-
-#endif
-
static inline int xen_get_default_ioreq_server_info(domid_t dom,
xen_pfn_t *ioreq_pfn,
xen_pfn_t *bufioreq_pfn,
@@ -420,84 +361,6 @@ static inline int xen_get_default_ioreq_server_info(domid_t dom,
return 0;
}
-/* Xen before 4.5 */
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
-
-#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
-#define HVM_PARAM_BUFIOREQ_EVTCHN 26
-#endif
-
-#define IOREQ_TYPE_PCI_CONFIG 2
-
-typedef uint16_t ioservid_t;
-
-static inline void xen_map_memory_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_unmap_memory_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_map_io_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_unmap_io_section(domid_t dom,
- ioservid_t ioservid,
- MemoryRegionSection *section)
-{
-}
-
-static inline void xen_map_pcidev(domid_t dom,
- ioservid_t ioservid,
- PCIDevice *pci_dev)
-{
-}
-
-static inline void xen_unmap_pcidev(domid_t dom,
- ioservid_t ioservid,
- PCIDevice *pci_dev)
-{
-}
-
-static inline void xen_create_ioreq_server(domid_t dom,
- ioservid_t *ioservid)
-{
-}
-
-static inline void xen_destroy_ioreq_server(domid_t dom,
- ioservid_t ioservid)
-{
-}
-
-static inline int xen_get_ioreq_server_info(domid_t dom,
- ioservid_t ioservid,
- xen_pfn_t *ioreq_pfn,
- xen_pfn_t *bufioreq_pfn,
- evtchn_port_t *bufioreq_evtchn)
-{
- return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
- bufioreq_pfn,
- bufioreq_evtchn);
-}
-
-static inline int xen_set_ioreq_server_state(domid_t dom,
- ioservid_t ioservid,
- bool enable)
-{
- return 0;
-}
-
-/* Xen 4.5 */
-#else
-
static bool use_default_ioreq_server;
static inline void xen_map_memory_section(domid_t dom,
@@ -600,8 +463,8 @@ static inline void xen_unmap_pcidev(domid_t dom,
PCI_FUNC(pci_dev->devfn));
}
-static inline void xen_create_ioreq_server(domid_t dom,
- ioservid_t *ioservid)
+static inline int xen_create_ioreq_server(domid_t dom,
+ ioservid_t *ioservid)
{
int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
HVM_IOREQSRV_BUFIOREQ_ATOMIC,
@@ -609,12 +472,14 @@ static inline void xen_create_ioreq_server(domid_t dom,
if (rc == 0) {
trace_xen_ioreq_server_create(*ioservid);
- return;
+ return rc;
}
*ioservid = 0;
use_default_ioreq_server = true;
trace_xen_default_ioreq_server();
+
+ return rc;
}
static inline void xen_destroy_ioreq_server(domid_t dom,
@@ -658,33 +523,28 @@ static inline int xen_set_ioreq_server_state(domid_t dom,
enable);
}
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41500
+static inline int xendevicemodel_set_irq_level(xendevicemodel_handle *dmod,
+ domid_t domid, uint32_t irq,
+ unsigned int level)
+{
+ return -1;
+}
#endif
-/* Xen before 4.8 */
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
-
-struct xengnttab_grant_copy_segment {
- union xengnttab_copy_ptr {
- void *virt;
- struct {
- uint32_t ref;
- uint16_t offset;
- uint16_t domid;
- } foreign;
- } source, dest;
- uint16_t len;
- uint16_t flags;
- int16_t status;
-};
-
-typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41700
+#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
+#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
+#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
+#define GUEST_VIRTIO_MMIO_SPI_LAST 43
+#endif
-static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
- xengnttab_grant_copy_segment_t *segs)
-{
- return -ENOSYS;
-}
+#if defined(__i386__) || defined(__x86_64__)
+#define GUEST_RAM_BANKS 2
+#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
+#define GUEST_RAM0_SIZE 0xc0000000ULL
+#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
+#define GUEST_RAM1_SIZE 0xfe00000000ULL
#endif
-#endif /* QEMU_HW_XEN_COMMON_H */
+#endif /* QEMU_HW_XEN_NATIVE_H */
diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
index 83e5174d90..ddad4b9f36 100644
--- a/include/hw/xen/xen_pvdev.h
+++ b/include/hw/xen/xen_pvdev.h
@@ -1,7 +1,9 @@
#ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H
-#include "hw/xen/xen_common.h"
+#include "hw/qdev-core.h"
+#include "hw/xen/xen_backend_ops.h"
+
/* ------------------------------------------------------------- */
#define XEN_BUFSIZE 1024
@@ -38,6 +40,7 @@ struct XenLegacyDevice {
char name[64];
int debug;
+ struct qemu_xs_watch *watch;
enum xenbus_state be_state;
enum xenbus_state fe_state;
int online;
@@ -63,7 +66,6 @@ int xenstore_write_int64(const char *base, const char *node, int64_t ival);
char *xenstore_read_str(const char *base, const char *node);
int xenstore_read_int(const char *base, const char *node, int *ival);
int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval);
-void xenstore_update(void *unused);
const char *xenbus_strstate(enum xenbus_state state);
@@ -76,6 +78,6 @@ void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev);
int xen_pv_send_notify(struct XenLegacyDevice *xendev);
void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level,
- const char *fmt, ...) GCC_FMT_ATTR(3, 4);
+ const char *fmt, ...) G_GNUC_PRINTF(3, 4);
#endif /* QEMU_HW_XEN_PVDEV_H */
diff --git a/include/io/channel-buffer.h b/include/io/channel-buffer.h
index 3f4b3f29e1..c9463ee655 100644
--- a/include/io/channel-buffer.h
+++ b/include/io/channel-buffer.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_BUFFER_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_BUFFER "qio-channel-buffer"
-#define QIO_CHANNEL_BUFFER(obj) \
- OBJECT_CHECK(QIOChannelBuffer, (obj), TYPE_QIO_CHANNEL_BUFFER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelBuffer, QIO_CHANNEL_BUFFER)
-typedef struct QIOChannelBuffer QIOChannelBuffer;
/**
* QIOChannelBuffer:
diff --git a/include/io/channel-command.h b/include/io/channel-command.h
index 336d47fa5c..98934e6d9e 100644
--- a/include/io/channel-command.h
+++ b/include/io/channel-command.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_COMMAND_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_COMMAND "qio-channel-command"
-#define QIO_CHANNEL_COMMAND(obj) \
- OBJECT_CHECK(QIOChannelCommand, (obj), TYPE_QIO_CHANNEL_COMMAND)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelCommand, QIO_CHANNEL_COMMAND)
-typedef struct QIOChannelCommand QIOChannelCommand;
/**
@@ -42,36 +41,14 @@ struct QIOChannelCommand {
QIOChannel parent;
int writefd;
int readfd;
- pid_t pid;
+ GPid pid;
+#ifdef WIN32
+ bool blocking;
+#endif
};
/**
- * qio_channel_command_new_pid:
- * @writefd: the FD connected to the command's stdin
- * @readfd: the FD connected to the command's stdout
- * @pid: the PID of the running child command
- * @errp: pointer to a NULL-initialized error object
- *
- * Create a channel for performing I/O with the
- * previously spawned command identified by @pid.
- * The two file descriptors provide the connection
- * to command's stdio streams, either one or which
- * may be -1 to indicate that stream is not open.
- *
- * The channel will take ownership of the process
- * @pid and will kill it when closing the channel.
- * Similarly it will take responsibility for
- * closing the file descriptors @writefd and @readfd.
- *
- * Returns: the command channel object, or NULL on error
- */
-QIOChannelCommand *
-qio_channel_command_new_pid(int writefd,
- int readfd,
- pid_t pid);
-
-/**
* qio_channel_command_new_spawn:
* @argv: the NULL terminated list of command arguments
* @flags: the I/O mode, one of O_RDONLY, O_WRONLY, O_RDWR
diff --git a/include/io/channel-file.h b/include/io/channel-file.h
index ebfe54ec70..d373a4e44d 100644
--- a/include/io/channel-file.h
+++ b/include/io/channel-file.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,12 +22,11 @@
#define QIO_CHANNEL_FILE_H
#include "io/channel.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_FILE "qio-channel-file"
-#define QIO_CHANNEL_FILE(obj) \
- OBJECT_CHECK(QIOChannelFile, (obj), TYPE_QIO_CHANNEL_FILE)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelFile, QIO_CHANNEL_FILE)
-typedef struct QIOChannelFile QIOChannelFile;
/**
* QIOChannelFile:
@@ -70,6 +69,24 @@ QIOChannelFile *
qio_channel_file_new_fd(int fd);
/**
+ * qio_channel_file_new_dupfd:
+ * @fd: the file descriptor
+ * @errp: pointer to initialized error object
+ *
+ * Create a new IO channel object for a file represented by the @fd
+ * parameter. Like qio_channel_file_new_fd(), but the @fd is first
+ * duplicated with dup().
+ *
+ * The channel will own the duplicated file descriptor and will take
+ * responsibility for closing it, the original FD is owned by the
+ * caller.
+ *
+ * Returns: the new channel object
+ */
+QIOChannelFile *
+qio_channel_file_new_dupfd(int fd, Error **errp);
+
+/**
* qio_channel_file_new_path:
* @path: the file path
* @flags: the open flags (O_RDONLY|O_WRONLY|O_RDWR, etc)
diff --git a/include/io/channel-null.h b/include/io/channel-null.h
new file mode 100644
index 0000000000..f6d54e63cf
--- /dev/null
+++ b/include/io/channel-null.h
@@ -0,0 +1,55 @@
+/*
+ * QEMU I/O channels null driver
+ *
+ * Copyright (c) 2022 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QIO_CHANNEL_FILE_H
+#define QIO_CHANNEL_FILE_H
+
+#include "io/channel.h"
+#include "qom/object.h"
+
+#define TYPE_QIO_CHANNEL_NULL "qio-channel-null"
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelNull, QIO_CHANNEL_NULL)
+
+
+/**
+ * QIOChannelNull:
+ *
+ * The QIOChannelNull object provides a channel implementation
+ * that discards all writes and returns EOF for all reads.
+ */
+
+struct QIOChannelNull {
+ QIOChannel parent;
+ bool closed;
+};
+
+
+/**
+ * qio_channel_null_new:
+ *
+ * Create a new IO channel object that discards all writes
+ * and returns EOF for all reads.
+ *
+ * Returns: the new channel object
+ */
+QIOChannelNull *
+qio_channel_null_new(void);
+
+#endif /* QIO_CHANNEL_NULL_H */
diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index 777ff5954e..ab15577d38 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "io/task.h"
#include "qemu/sockets.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_SOCKET "qio-channel-socket"
-#define QIO_CHANNEL_SOCKET(obj) \
- OBJECT_CHECK(QIOChannelSocket, (obj), TYPE_QIO_CHANNEL_SOCKET)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelSocket, QIO_CHANNEL_SOCKET)
-typedef struct QIOChannelSocket QIOChannelSocket;
/**
* QIOChannelSocket:
@@ -48,6 +47,8 @@ struct QIOChannelSocket {
socklen_t localAddrLen;
struct sockaddr_storage remoteAddr;
socklen_t remoteAddrLen;
+ ssize_t zero_copy_queued;
+ ssize_t zero_copy_sent;
};
@@ -123,7 +124,7 @@ void qio_channel_socket_connect_async(QIOChannelSocket *ioc,
* qio_channel_socket_listen_sync:
* @ioc: the socket channel object
* @addr: the address to listen to
- * @num: the expected ammount of connections
+ * @num: the expected amount of connections
* @errp: pointer to a NULL-initialized error object
*
* Attempt to listen to the address @addr. This method
@@ -140,7 +141,7 @@ int qio_channel_socket_listen_sync(QIOChannelSocket *ioc,
* qio_channel_socket_listen_async:
* @ioc: the socket channel object
* @addr: the address to listen to
- * @num: the expected ammount of connections
+ * @num: the expected amount of connections
* @callback: the function to invoke on completion
* @opaque: user data to pass to @callback
* @destroy: the function to free @opaque
diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
index fdbdf12feb..26c67f17e2 100644
--- a/include/io/channel-tls.h
+++ b/include/io/channel-tls.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "io/task.h"
#include "crypto/tlssession.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_TLS "qio-channel-tls"
-#define QIO_CHANNEL_TLS(obj) \
- OBJECT_CHECK(QIOChannelTLS, (obj), TYPE_QIO_CHANNEL_TLS)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelTLS, QIO_CHANNEL_TLS)
-typedef struct QIOChannelTLS QIOChannelTLS;
/**
* QIOChannelTLS
@@ -49,6 +48,7 @@ struct QIOChannelTLS {
QIOChannel *master;
QCryptoTLSSession *session;
QIOChannelShutdown shutdown;
+ guint hs_ioc_tag;
};
/**
diff --git a/include/io/channel-util.h b/include/io/channel-util.h
index c0b79cf603..fa18a3756d 100644
--- a/include/io/channel-util.h
+++ b/include/io/channel-util.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -49,4 +49,27 @@
QIOChannel *qio_channel_new_fd(int fd,
Error **errp);
+/**
+ * qio_channel_util_set_aio_fd_handler:
+ * @read_fd: the file descriptor for the read handler
+ * @read_ctx: the AioContext for the read handler
+ * @io_read: the read handler
+ * @write_fd: the file descriptor for the write handler
+ * @write_ctx: the AioContext for the write handler
+ * @io_write: the write handler
+ * @opaque: the opaque argument to the read and write handler
+ *
+ * Set the read and write handlers when @read_ctx and @write_ctx are non-NULL,
+ * respectively. To leave a handler in its current state, pass a NULL
+ * AioContext. To clear a handler, pass a non-NULL AioContext and a NULL
+ * handler.
+ */
+void qio_channel_util_set_aio_fd_handler(int read_fd,
+ AioContext *read_ctx,
+ IOHandler *io_read,
+ int write_fd,
+ AioContext *write_ctx,
+ IOHandler *io_write,
+ void *opaque);
+
#endif /* QIO_CHANNEL_UTIL_H */
diff --git a/include/io/channel-watch.h b/include/io/channel-watch.h
index 63bc4ae2d9..a36aab8f8f 100644
--- a/include/io/channel-watch.h
+++ b/include/io/channel-watch.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/io/channel-websock.h b/include/io/channel-websock.h
index a7e5e92e61..e180827c57 100644
--- a/include/io/channel-websock.h
+++ b/include/io/channel-websock.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,12 +24,11 @@
#include "io/channel.h"
#include "qemu/buffer.h"
#include "io/task.h"
+#include "qom/object.h"
#define TYPE_QIO_CHANNEL_WEBSOCK "qio-channel-websock"
-#define QIO_CHANNEL_WEBSOCK(obj) \
- OBJECT_CHECK(QIOChannelWebsock, (obj), TYPE_QIO_CHANNEL_WEBSOCK)
+OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelWebsock, QIO_CHANNEL_WEBSOCK)
-typedef struct QIOChannelWebsock QIOChannelWebsock;
typedef union QIOChannelWebsockMask QIOChannelWebsockMask;
union QIOChannelWebsockMask {
diff --git a/include/io/channel.h b/include/io/channel.h
index d4557f0930..7986c49c71 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -22,28 +22,29 @@
#define QIO_CHANNEL_H
#include "qom/object.h"
-#include "qemu/coroutine.h"
+#include "qemu/coroutine-core.h"
#include "block/aio.h"
#define TYPE_QIO_CHANNEL "qio-channel"
-#define QIO_CHANNEL(obj) \
- OBJECT_CHECK(QIOChannel, (obj), TYPE_QIO_CHANNEL)
-#define QIO_CHANNEL_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIOChannelClass, klass, TYPE_QIO_CHANNEL)
-#define QIO_CHANNEL_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIOChannelClass, obj, TYPE_QIO_CHANNEL)
+OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass,
+ QIO_CHANNEL)
-typedef struct QIOChannel QIOChannel;
-typedef struct QIOChannelClass QIOChannelClass;
#define QIO_CHANNEL_ERR_BLOCK -2
+#define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1
+
+#define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1
+
typedef enum QIOChannelFeature QIOChannelFeature;
enum QIOChannelFeature {
QIO_CHANNEL_FEATURE_FD_PASS,
QIO_CHANNEL_FEATURE_SHUTDOWN,
QIO_CHANNEL_FEATURE_LISTEN,
+ QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY,
+ QIO_CHANNEL_FEATURE_READ_MSG_PEEK,
+ QIO_CHANNEL_FEATURE_SEEKABLE,
};
@@ -81,9 +82,11 @@ struct QIOChannel {
Object parent;
unsigned int features; /* bitmask of QIOChannelFeatures */
char *name;
- AioContext *ctx;
+ AioContext *read_ctx;
Coroutine *read_coroutine;
+ AioContext *write_ctx;
Coroutine *write_coroutine;
+ bool follow_coroutine_ctx;
#ifdef _WIN32
HANDLE event; /* For use with GSource on Win32 */
#endif
@@ -98,7 +101,8 @@ struct QIOChannel {
* provide additional optional features.
*
* Consult the corresponding public API docs for a description
- * of the semantics of each callback
+ * of the semantics of each callback. io_shutdown in particular
+ * must be thread-safe, terminate quickly and must not block.
*/
struct QIOChannelClass {
ObjectClass parent;
@@ -109,12 +113,14 @@ struct QIOChannelClass {
size_t niov,
int *fds,
size_t nfds,
+ int flags,
Error **errp);
ssize_t (*io_readv)(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds,
size_t *nfds,
+ int flags,
Error **errp);
int (*io_close)(QIOChannel *ioc,
Error **errp);
@@ -125,6 +131,16 @@ struct QIOChannelClass {
Error **errp);
/* Optional callbacks */
+ ssize_t (*io_pwritev)(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ off_t offset,
+ Error **errp);
+ ssize_t (*io_preadv)(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ off_t offset,
+ Error **errp);
int (*io_shutdown)(QIOChannel *ioc,
QIOChannelShutdown how,
Error **errp);
@@ -137,10 +153,13 @@ struct QIOChannelClass {
int whence,
Error **errp);
void (*io_set_aio_fd_handler)(QIOChannel *ioc,
- AioContext *ctx,
+ AioContext *read_ctx,
IOHandler *io_read,
+ AioContext *write_ctx,
IOHandler *io_write,
void *opaque);
+ int (*io_flush)(QIOChannel *ioc,
+ Error **errp);
};
/* General I/O handling functions */
@@ -187,6 +206,7 @@ void qio_channel_set_name(QIOChannel *ioc,
* @niov: the length of the @iov array
* @fds: pointer to an array that will received file handles
* @nfds: pointer filled with number of elements in @fds on return
+ * @flags: read flags (QIO_CHANNEL_READ_FLAG_*)
* @errp: pointer to a NULL-initialized error object
*
* Read data from the IO channel, storing it in the
@@ -223,6 +243,7 @@ ssize_t qio_channel_readv_full(QIOChannel *ioc,
size_t niov,
int **fds,
size_t *nfds,
+ int flags,
Error **errp);
@@ -233,6 +254,7 @@ ssize_t qio_channel_readv_full(QIOChannel *ioc,
* @niov: the length of the @iov array
* @fds: an array of file handles to send
* @nfds: number of file handles in @fds
+ * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
* @errp: pointer to a NULL-initialized error object
*
* Write data to the IO channel, reading it from the
@@ -265,6 +287,7 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
size_t niov,
int *fds,
size_t nfds,
+ int flags,
Error **errp);
/**
@@ -292,10 +315,10 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file
* occurs without data, or -1 on error
*/
-int qio_channel_readv_all_eof(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **errp);
+int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
* qio_channel_readv_all:
@@ -319,10 +342,10 @@ int qio_channel_readv_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
-int qio_channel_readv_all(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **errp);
+int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
@@ -344,10 +367,10 @@ int qio_channel_readv_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
-int qio_channel_writev_all(QIOChannel *ioc,
- const struct iovec *iov,
- size_t niov,
- Error **erp);
+int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp);
/**
* qio_channel_readv:
@@ -428,10 +451,10 @@ ssize_t qio_channel_write(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file occurs
* without data, or -1 on error
*/
-int qio_channel_read_all_eof(QIOChannel *ioc,
- char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
+ char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_read_all:
@@ -448,10 +471,10 @@ int qio_channel_read_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
-int qio_channel_read_all(QIOChannel *ioc,
- char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
+ char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_write_all:
@@ -467,10 +490,10 @@ int qio_channel_read_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
-int qio_channel_write_all(QIOChannel *ioc,
- const char *buf,
- size_t buflen,
- Error **errp);
+int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
+ const char *buf,
+ size_t buflen,
+ Error **errp);
/**
* qio_channel_set_blocking:
@@ -490,6 +513,21 @@ int qio_channel_set_blocking(QIOChannel *ioc,
Error **errp);
/**
+ * qio_channel_set_follow_coroutine_ctx:
+ * @ioc: the channel object
+ * @enabled: whether or not to follow the coroutine's AioContext
+ *
+ * If @enabled is true, calls to qio_channel_yield() use the current
+ * coroutine's AioContext. Usually this is desirable.
+ *
+ * If @enabled is false, calls to qio_channel_yield() use the global iohandler
+ * AioContext. This is may be used by coroutines that run in the main loop and
+ * do not wish to respond to I/O during nested event loops. This is the
+ * default for compatibility with code that is not aware of AioContexts.
+ */
+void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled);
+
+/**
* qio_channel_close:
* @ioc: the channel object
* @errp: pointer to a NULL-initialized error object
@@ -502,6 +540,78 @@ int qio_channel_close(QIOChannel *ioc,
Error **errp);
/**
+ * qio_channel_pwritev
+ * @ioc: the channel object
+ * @iov: the array of memory regions to write data from
+ * @niov: the length of the @iov array
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ * Behaves as qio_channel_writev_full, apart from not supporting
+ * sending of file handles as well as beginning the write at the
+ * passed @offset
+ *
+ */
+ssize_t qio_channel_pwritev(QIOChannel *ioc, const struct iovec *iov,
+ size_t niov, off_t offset, Error **errp);
+
+/**
+ * qio_channel_pwrite
+ * @ioc: the channel object
+ * @buf: the memory region to write data into
+ * @buflen: the number of bytes to @buf
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ */
+ssize_t qio_channel_pwrite(QIOChannel *ioc, char *buf, size_t buflen,
+ off_t offset, Error **errp);
+
+/**
+ * qio_channel_preadv
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data into
+ * @niov: the length of the @iov array
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ * Behaves as qio_channel_readv_full, apart from not supporting
+ * receiving of file handles as well as beginning the read at the
+ * passed @offset
+ *
+ */
+ssize_t qio_channel_preadv(QIOChannel *ioc, const struct iovec *iov,
+ size_t niov, off_t offset, Error **errp);
+
+/**
+ * qio_channel_pread
+ * @ioc: the channel object
+ * @buf: the memory region to write data into
+ * @buflen: the number of bytes to @buf
+ * @offset: offset in the channel where writes should begin
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Not all implementations will support this facility, so may report
+ * an error. To avoid errors, the caller may check for the feature
+ * flag QIO_CHANNEL_FEATURE_SEEKABLE prior to calling this method.
+ *
+ */
+ssize_t qio_channel_pread(QIOChannel *ioc, char *buf, size_t buflen,
+ off_t offset, Error **errp);
+
+/**
* qio_channel_shutdown:
* @ioc: the channel object
* @how: the direction to shutdown
@@ -516,6 +626,8 @@ int qio_channel_close(QIOChannel *ioc,
* QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling
* this method.
*
+ * This function is thread-safe, terminates quickly and does not block.
+ *
* Returns: 0 on success, -1 on error
*/
int qio_channel_shutdown(QIOChannel *ioc,
@@ -693,41 +805,6 @@ GSource *qio_channel_add_watch_source(QIOChannel *ioc,
GMainContext *context);
/**
- * qio_channel_attach_aio_context:
- * @ioc: the channel object
- * @ctx: the #AioContext to set the handlers on
- *
- * Request that qio_channel_yield() sets I/O handlers on
- * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
- * uses QEMU's main thread event loop.
- *
- * You can move a #QIOChannel from one #AioContext to another even if
- * I/O handlers are set for a coroutine. However, #QIOChannel provides
- * no synchronization between the calls to qio_channel_yield() and
- * qio_channel_attach_aio_context().
- *
- * Therefore you should first call qio_channel_detach_aio_context()
- * to ensure that the coroutine is not entered concurrently. Then,
- * while the coroutine has yielded, call qio_channel_attach_aio_context(),
- * and then aio_co_schedule() to place the coroutine on the new
- * #AioContext. The calls to qio_channel_detach_aio_context()
- * and qio_channel_attach_aio_context() should be protected with
- * aio_context_acquire() and aio_context_release().
- */
-void qio_channel_attach_aio_context(QIOChannel *ioc,
- AioContext *ctx);
-
-/**
- * qio_channel_detach_aio_context:
- * @ioc: the channel object
- *
- * Disable any I/O handlers set by qio_channel_yield(). With the
- * help of aio_co_schedule(), this allows moving a coroutine that was
- * paused by qio_channel_yield() to another context.
- */
-void qio_channel_detach_aio_context(QIOChannel *ioc);
-
-/**
* qio_channel_yield:
* @ioc: the channel object
* @condition: the I/O condition to wait for
@@ -747,6 +824,16 @@ void coroutine_fn qio_channel_yield(QIOChannel *ioc,
GIOCondition condition);
/**
+ * qio_channel_wake_read:
+ * @ioc: the channel object
+ *
+ * If qio_channel_yield() is currently waiting for the channel to become
+ * readable, interrupt it and reenter immediately. This function is safe to call
+ * from any thread.
+ */
+void qio_channel_wake_read(QIOChannel *ioc);
+
+/**
* qio_channel_wait:
* @ioc: the channel object
* @condition: the I/O condition to wait for
@@ -764,8 +851,9 @@ void qio_channel_wait(QIOChannel *ioc,
/**
* qio_channel_set_aio_fd_handler:
* @ioc: the channel object
- * @ctx: the AioContext to set the handlers on
+ * @read_ctx: the AioContext to set the read handler on or NULL
* @io_read: the read handler
+ * @write_ctx: the AioContext to set the write handler on or NULL
* @io_write: the write handler
* @opaque: the opaque value passed to the handler
*
@@ -773,11 +861,124 @@ void qio_channel_wait(QIOChannel *ioc,
* be used by channel implementations to forward the handlers
* to another channel (e.g. from #QIOChannelTLS to the
* underlying socket).
+ *
+ * When @read_ctx is NULL, don't touch the read handler. When @write_ctx is
+ * NULL, don't touch the write handler. Note that setting the read handler
+ * clears the write handler, and vice versa, if they share the same AioContext.
+ * Therefore the caller must pass both handlers together when sharing the same
+ * AioContext.
*/
void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
- AioContext *ctx,
+ AioContext *read_ctx,
IOHandler *io_read,
+ AioContext *write_ctx,
IOHandler *io_write,
void *opaque);
+/**
+ * qio_channel_readv_full_all_eof:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data to
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to read
+ * @nfds: number of file handles in @fds
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Performs same function as qio_channel_readv_all_eof.
+ * Additionally, attempts to read file descriptors shared
+ * over the channel. The function will wait for all
+ * requested data to be read, yielding from the current
+ * coroutine if required. data refers to both file
+ * descriptors and the iovs.
+ *
+ * Returns: 1 if all bytes were read, 0 if end-of-file
+ * occurs without data, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int **fds, size_t *nfds,
+ Error **errp);
+
+/**
+ * qio_channel_readv_full_all:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to read data to
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to read
+ * @nfds: number of file handles in @fds
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Performs same function as qio_channel_readv_all_eof.
+ * Additionally, attempts to read file descriptors shared
+ * over the channel. The function will wait for all
+ * requested data to be read, yielding from the current
+ * coroutine if required. data refers to both file
+ * descriptors and the iovs.
+ *
+ * Returns: 0 if all bytes were read, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int **fds, size_t *nfds,
+ Error **errp);
+
+/**
+ * qio_channel_writev_full_all:
+ * @ioc: the channel object
+ * @iov: the array of memory regions to write data from
+ * @niov: the length of the @iov array
+ * @fds: an array of file handles to send
+ * @nfds: number of file handles in @fds
+ * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
+ * @errp: pointer to a NULL-initialized error object
+ *
+ *
+ * Behaves like qio_channel_writev_full but will attempt
+ * to send all data passed (file handles and memory regions).
+ * The function will wait for all requested data
+ * to be written, yielding from the current coroutine
+ * if required.
+ *
+ * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags,
+ * instead of waiting for all requested data to be written,
+ * this function will wait until it's all queued for writing.
+ * In this case, if the buffer gets changed between queueing and
+ * sending, the updated buffer will be sent. If this is not a
+ * desired behavior, it's suggested to call qio_channel_flush()
+ * before reusing the buffer.
+ *
+ * Returns: 0 if all bytes were written, or -1 on error
+ */
+
+int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
+ const struct iovec *iov,
+ size_t niov,
+ int *fds, size_t nfds,
+ int flags, Error **errp);
+
+/**
+ * qio_channel_flush:
+ * @ioc: the channel object
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Will block until every packet queued with
+ * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
+ * is sent, or return in case of any error.
+ *
+ * If not implemented, acts as a no-op, and returns 0.
+ *
+ * Returns -1 if any error is found,
+ * 1 if every send failed to use zero copy.
+ * 0 otherwise.
+ */
+
+int qio_channel_flush(QIOChannel *ioc,
+ Error **errp);
+
#endif /* QIO_CHANNEL_H */
diff --git a/include/io/dns-resolver.h b/include/io/dns-resolver.h
index a475e978c8..558ea517de 100644
--- a/include/io/dns-resolver.h
+++ b/include/io/dns-resolver.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,15 +26,9 @@
#include "io/task.h"
#define TYPE_QIO_DNS_RESOLVER "qio-dns-resolver"
-#define QIO_DNS_RESOLVER(obj) \
- OBJECT_CHECK(QIODNSResolver, (obj), TYPE_QIO_DNS_RESOLVER)
-#define QIO_DNS_RESOLVER_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIODNSResolverClass, klass, TYPE_QIO_DNS_RESOLVER)
-#define QIO_DNS_RESOLVER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIODNSResolverClass, obj, TYPE_QIO_DNS_RESOLVER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIODNSResolver,
+ QIO_DNS_RESOLVER)
-typedef struct QIODNSResolver QIODNSResolver;
-typedef struct QIODNSResolverClass QIODNSResolverClass;
/**
* QIODNSResolver:
@@ -139,9 +133,6 @@ struct QIODNSResolver {
Object parent;
};
-struct QIODNSResolverClass {
- ObjectClass parent;
-};
/**
diff --git a/include/io/net-listener.h b/include/io/net-listener.h
index fb101703e3..ab9f291ed6 100644
--- a/include/io/net-listener.h
+++ b/include/io/net-listener.h
@@ -22,17 +22,12 @@
#define QIO_NET_LISTENER_H
#include "io/channel-socket.h"
+#include "qom/object.h"
#define TYPE_QIO_NET_LISTENER "qio-net-listener"
-#define QIO_NET_LISTENER(obj) \
- OBJECT_CHECK(QIONetListener, (obj), TYPE_QIO_NET_LISTENER)
-#define QIO_NET_LISTENER_CLASS(klass) \
- OBJECT_CLASS_CHECK(QIONetListenerClass, klass, TYPE_QIO_NET_LISTENER)
-#define QIO_NET_LISTENER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QIONetListenerClass, obj, TYPE_QIO_NET_LISTENER)
+OBJECT_DECLARE_SIMPLE_TYPE(QIONetListener,
+ QIO_NET_LISTENER)
-typedef struct QIONetListener QIONetListener;
-typedef struct QIONetListenerClass QIONetListenerClass;
typedef void (*QIONetListenerClientFunc)(QIONetListener *listener,
QIOChannelSocket *sioc,
@@ -63,9 +58,6 @@ struct QIONetListener {
GDestroyNotify io_notify;
};
-struct QIONetListenerClass {
- ObjectClass parent;
-};
/**
diff --git a/include/io/task.h b/include/io/task.h
index 6818dfedd0..0b5342ee84 100644
--- a/include/io/task.h
+++ b/include/io/task.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -145,11 +145,11 @@ typedef void (*QIOTaskWorker)(QIOTask *task,
* The QIOTask module can also be used to perform operations
* in a background thread context, while still reporting the
* results in the main event thread. This allows code which
- * cannot easily be rewritten to be asychronous (such as DNS
+ * cannot easily be rewritten to be asynchronous (such as DNS
* lookups) to be easily run non-blocking. Reporting the
* results in the main thread context means that the caller
* typically does not need to be concerned about thread
- * safety wrt the QEMU global mutex.
+ * safety wrt the BQL.
*
* For example, the socket_listen() method will block the caller
* while DNS lookups take place if given a name, instead of IP
diff --git a/include/libdecnumber/dconfig.h b/include/libdecnumber/dconfig.h
index 0f7dccef1f..2bc0ba7f14 100644
--- a/include/libdecnumber/dconfig.h
+++ b/include/libdecnumber/dconfig.h
@@ -28,7 +28,7 @@
02110-1301, USA. */
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define WORDS_BIGENDIAN 1
#else
#define WORDS_BIGENDIAN 0
diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h
index aa115fed07..41bc2a0d36 100644
--- a/include/libdecnumber/decNumber.h
+++ b/include/libdecnumber/decNumber.h
@@ -116,12 +116,16 @@
decNumber * decNumberFromUInt32(decNumber *, uint32_t);
decNumber *decNumberFromInt64(decNumber *, int64_t);
decNumber *decNumberFromUInt64(decNumber *, uint64_t);
+ decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t);
+ decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t);
decNumber * decNumberFromString(decNumber *, const char *, decContext *);
char * decNumberToString(const decNumber *, char *);
char * decNumberToEngString(const decNumber *, char *);
uint32_t decNumberToUInt32(const decNumber *, decContext *);
int32_t decNumberToInt32(const decNumber *, decContext *);
int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set);
+ void decNumberIntegralToInt128(const decNumber *dn, decContext *set,
+ uint64_t *plow, uint64_t *phigh);
uint8_t * decNumberGetBCD(const decNumber *, uint8_t *);
decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t);
diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h
index 4d53c077f2..6198ca8593 100644
--- a/include/libdecnumber/decNumberLocal.h
+++ b/include/libdecnumber/decNumberLocal.h
@@ -98,7 +98,7 @@
/* Shared lookup tables */
extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */
- extern const uLong DECPOWERS[19]; /* powers of ten table */
+ extern const uLong DECPOWERS[20]; /* powers of ten table */
/* The following are included from decDPD.h */
extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */
extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */
diff --git a/include/migration/blocker.h b/include/migration/blocker.h
index acd27018e9..a687ac0efe 100644
--- a/include/migration/blocker.h
+++ b/include/migration/blocker.h
@@ -14,22 +14,84 @@
#ifndef MIGRATION_BLOCKER_H
#define MIGRATION_BLOCKER_H
+#include "qapi/qapi-types-migration.h"
+
+#define MIG_MODE_ALL MIG_MODE__MAX
+
+/**
+ * @migrate_add_blocker - prevent all modes of migration from proceeding
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
+ */
+int migrate_add_blocker(Error **reasonp, Error **errp);
+
+/**
+ * @migrate_add_blocker_internal - prevent all modes of migration from
+ * proceeding, but ignore -only-migratable
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @returns - 0 on success, -EBUSY on failure, with errp set.
+ *
+ * Some of the migration blockers can be temporary (e.g., for a few seconds),
+ * so it shouldn't need to conflict with "-only-migratable". For those cases,
+ * we can call this function rather than @migrate_add_blocker().
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
+ */
+int migrate_add_blocker_internal(Error **reasonp, Error **errp);
+
+/**
+ * @migrate_del_blocker - remove a migration blocker from all modes and free it.
+ *
+ * @reasonp - address of the error blocking migration
+ *
+ * This function frees *@reasonp and sets it to NULL.
+ */
+void migrate_del_blocker(Error **reasonp);
+
/**
- * @migrate_add_blocker - prevent migration from proceeding
+ * @migrate_add_blocker_normal - prevent normal migration mode from proceeding
*
- * @reason - an error to be returned whenever migration is attempted
+ * @reasonp - address of an error to be returned whenever migration is attempted
*
* @errp - [out] The reason (if any) we cannot block migration right now.
*
* @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
*/
-int migrate_add_blocker(Error *reason, Error **errp);
+int migrate_add_blocker_normal(Error **reasonp, Error **errp);
/**
- * @migrate_del_blocker - remove a blocking error from migration
+ * @migrate_add_blocker_modes - prevent some modes of migration from proceeding
+ *
+ * @reasonp - address of an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @mode - one or more migration modes to be blocked. The list is terminated
+ * by -1 or MIG_MODE_ALL. For the latter, all modes are blocked.
+ *
+ * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
*
- * @reason - the error blocking migration
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free *@reasonp before the blocker is removed.
*/
-void migrate_del_blocker(Error *reason);
+int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...);
#endif
diff --git a/include/migration/client-options.h b/include/migration/client-options.h
new file mode 100644
index 0000000000..59f4b55cf4
--- /dev/null
+++ b/include/migration/client-options.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU public migration capabilities
+ *
+ * Copyright (c) 2012-2023 Red Hat Inc
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MIGRATION_CLIENT_OPTIONS_H
+#define QEMU_MIGRATION_CLIENT_OPTIONS_H
+
+/* capabilities */
+
+bool migrate_background_snapshot(void);
+bool migrate_dirty_limit(void);
+bool migrate_postcopy_ram(void);
+bool migrate_switchover_ack(void);
+
+/* parameters */
+
+MigMode migrate_mode(void);
+uint64_t migrate_vcpu_dirty_limit_period(void);
+
+#endif
diff --git a/include/migration/colo.h b/include/migration/colo.h
index 768e1f04c3..eaac07f26d 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -28,7 +28,6 @@ bool migration_in_colo_state(void);
int migration_incoming_enable_colo(void);
void migration_incoming_disable_colo(void);
bool migration_incoming_colo_enabled(void);
-void *colo_process_incoming_thread(void *opaque);
bool migration_incoming_in_colo_state(void);
COLOMode get_colo_mode(void);
@@ -36,5 +35,21 @@ COLOMode get_colo_mode(void);
/* failover */
void colo_do_failover(void);
-void colo_checkpoint_notify(void *opaque);
+/*
+ * colo_checkpoint_delay_set
+ *
+ * Handles change of x-checkpoint-delay migration parameter, called from
+ * migrate_params_apply() to notify COLO module about the change.
+ */
+void colo_checkpoint_delay_set(void);
+
+/*
+ * Starts COLO incoming process. Called from process_incoming_migration_co()
+ * after loading the state.
+ *
+ * Called with BQL locked, may temporary release BQL.
+ */
+int coroutine_fn colo_incoming_co(void);
+
+void colo_shutdown(void);
#endif
diff --git a/include/migration/global_state.h b/include/migration/global_state.h
index 945eb35d5b..d7c2cd3216 100644
--- a/include/migration/global_state.h
+++ b/include/migration/global_state.h
@@ -16,7 +16,7 @@
#include "qapi/qapi-types-run-state.h"
void register_global_state(void);
-int global_state_store(void);
+void global_state_store(void);
void global_state_store_running(void);
bool global_state_received(void);
RunState global_state_get_runstate(void);
diff --git a/include/migration/misc.h b/include/migration/misc.h
index 34e7d75713..c9e200f4eb 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -15,7 +15,9 @@
#define MIGRATION_MISC_H
#include "qemu/notify.h"
+#include "qapi/qapi-types-migration.h"
#include "qapi/qapi-types-net.h"
+#include "migration/client-options.h"
/* migration/ram.c */
@@ -30,17 +32,16 @@ typedef enum PrecopyNotifyReason {
typedef struct PrecopyNotifyData {
enum PrecopyNotifyReason reason;
- Error **errp;
} PrecopyNotifyData;
void precopy_infrastructure_init(void);
void precopy_add_notifier(NotifierWithReturn *n);
void precopy_remove_notifier(NotifierWithReturn *n);
int precopy_notify(PrecopyNotifyReason reason, Error **errp);
-void precopy_enable_free_page_optimization(void);
void ram_mig_init(void);
void qemu_guest_free_page_hint(void *addr, size_t len);
+bool migrate_ram_is_ignored(RAMBlock *block);
/* migration/block.c */
@@ -58,20 +59,61 @@ void dump_vmstate_json_to_file(FILE *out_fp);
/* migration/migration.c */
void migration_object_init(void);
void migration_shutdown(void);
-void qemu_start_incoming_migration(const char *uri, Error **errp);
bool migration_is_idle(void);
-bool migration_is_active(MigrationState *);
-void add_migration_state_change_notifier(Notifier *notify);
-void remove_migration_state_change_notifier(Notifier *notify);
-bool migration_in_setup(MigrationState *);
-bool migration_has_finished(MigrationState *);
-bool migration_has_failed(MigrationState *);
-/* ...and after the device transmission */
-bool migration_in_postcopy_after_devices(MigrationState *);
-void migration_global_dump(Monitor *mon);
-/* True if incomming migration entered POSTCOPY_INCOMING_DISCARD */
+bool migration_is_active(void);
+bool migration_is_device(void);
+bool migration_thread_is_self(void);
+bool migration_is_setup_or_active(void);
+
+typedef enum MigrationEventType {
+ MIG_EVENT_PRECOPY_SETUP,
+ MIG_EVENT_PRECOPY_DONE,
+ MIG_EVENT_PRECOPY_FAILED,
+ MIG_EVENT_MAX
+} MigrationEventType;
+
+typedef struct MigrationEvent {
+ MigrationEventType type;
+} MigrationEvent;
+
+/*
+ * A MigrationNotifyFunc may return an error code and an Error object,
+ * but only when @e->type is MIG_EVENT_PRECOPY_SETUP. The code is an int
+ * to allow for different failure modes and recovery actions.
+ */
+typedef int (*MigrationNotifyFunc)(NotifierWithReturn *notify,
+ MigrationEvent *e, Error **errp);
+
+/*
+ * Register the notifier @notify to be called when a migration event occurs
+ * for MIG_MODE_NORMAL, as specified by the MigrationEvent passed to @func.
+ * Notifiers may receive events in any of the following orders:
+ * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_DONE
+ * - MIG_EVENT_PRECOPY_SETUP -> MIG_EVENT_PRECOPY_FAILED
+ * - MIG_EVENT_PRECOPY_FAILED
+ */
+void migration_add_notifier(NotifierWithReturn *notify,
+ MigrationNotifyFunc func);
+
+/*
+ * Same as migration_add_notifier, but applies to be specified @mode.
+ */
+void migration_add_notifier_mode(NotifierWithReturn *notify,
+ MigrationNotifyFunc func, MigMode mode);
+
+void migration_remove_notifier(NotifierWithReturn *notify);
+bool migration_is_running(void);
+void migration_file_set_error(int err);
+
+/* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */
bool migration_in_incoming_postcopy(void);
+/* True if incoming migration entered POSTCOPY_INCOMING_ADVISE */
+bool migration_incoming_postcopy_advised(void);
+
+/* True if background snapshot is active */
+bool migration_in_bg_snapshot(void);
+
/* migration/block-dirty-bitmap.c */
void dirty_bitmap_mig_init(void);
diff --git a/include/migration/qemu-file-types.h b/include/migration/qemu-file-types.h
index 2867e3da84..adec5abc07 100644
--- a/include/migration/qemu-file-types.h
+++ b/include/migration/qemu-file-types.h
@@ -35,7 +35,7 @@ void qemu_put_byte(QEMUFile *f, int v);
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
-size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
+size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
int qemu_get_byte(QEMUFile *f);
@@ -50,6 +50,8 @@ unsigned int qemu_get_be16(QEMUFile *f);
unsigned int qemu_get_be32(QEMUFile *f);
uint64_t qemu_get_be64(QEMUFile *f);
+bool qemu_file_is_seekable(QEMUFile *f);
+
static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv)
{
qemu_put_be64(f, *pv);
@@ -161,10 +163,20 @@ static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv)
qemu_get_be64s(f, (uint64_t *)pv);
}
-size_t qemu_get_counted_string(QEMUFile *f, char buf[256]);
+size_t coroutine_mixed_fn qemu_get_counted_string(QEMUFile *f, char buf[256]);
void qemu_put_counted_string(QEMUFile *f, const char *name);
-int qemu_file_rate_limit(QEMUFile *f);
+/**
+ * migration_rate_exceeded: Check if we have exceeded rate for this interval
+ *
+ * Checks if we have already transferred more data that we are allowed
+ * in the current interval.
+ *
+ * @f: QEMUFile used for main migration channel
+ *
+ * Returns if we should stop sending data for this interval.
+ */
+bool migration_rate_exceeded(QEMUFile *f);
#endif
diff --git a/include/migration/register.h b/include/migration/register.h
index c1dcff0f90..f60e797894 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -16,66 +16,289 @@
#include "hw/vmstate-if.h"
+/**
+ * struct SaveVMHandlers: handler structure to finely control
+ * migration of complex subsystems and devices, such as RAM, block and
+ * VFIO.
+ */
typedef struct SaveVMHandlers {
- /* This runs inside the iothread lock. */
- SaveStateHandler *save_state;
+ /* The following handlers run inside the BQL. */
+
+ /**
+ * @save_state
+ *
+ * Saves state section on the source using the latest state format
+ * version.
+ *
+ * Legacy method. Should be deprecated when all users are ported
+ * to VMStateDescription.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ */
+ void (*save_state)(QEMUFile *f, void *opaque);
+
+ /**
+ * @save_prepare
+ *
+ * Called early, even before migration starts, and can be used to
+ * perform early checks.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*save_prepare)(void *opaque, Error **errp);
+
+ /**
+ * @save_setup
+ *
+ * Initializes the data structures on the source and transmits
+ * first section containing information on the device
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*save_setup)(QEMUFile *f, void *opaque, Error **errp);
+
+ /**
+ * @save_cleanup
+ *
+ * Uninitializes the data structures on the source
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ */
void (*save_cleanup)(void *opaque);
+
+ /**
+ * @save_live_complete_postcopy
+ *
+ * Called at the end of postcopy for all postcopyable devices.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
+
+ /**
+ * @save_live_complete_precopy
+ *
+ * Transmits the last section for the device containing any
+ * remaining data at the end of a precopy phase. When postcopy is
+ * enabled, devices that support postcopy will skip this step,
+ * where the final data will be flushed at the end of postcopy via
+ * @save_live_complete_postcopy instead.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
- /* This runs both outside and inside the iothread lock. */
+ /* This runs both outside and inside the BQL. */
+
+ /**
+ * @is_active
+ *
+ * Will skip a state section if not active
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if state section is active else false
+ */
bool (*is_active)(void *opaque);
+
+ /**
+ * @has_postcopy
+ *
+ * Checks if a device supports postcopy
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true for postcopy support else false
+ */
bool (*has_postcopy)(void *opaque);
- /* is_active_iterate
- * If it is not NULL then qemu_savevm_state_iterate will skip iteration if
- * it returns false. For example, it is needed for only-postcopy-states,
- * which needs to be handled by qemu_savevm_state_setup and
- * qemu_savevm_state_pending, but do not need iterations until not in
- * postcopy stage.
+ /**
+ * @is_active_iterate
+ *
+ * As #SaveVMHandlers.is_active(), will skip an inactive state
+ * section in qemu_savevm_state_iterate.
+ *
+ * For example, it is needed for only-postcopy-states, which needs
+ * to be handled by qemu_savevm_state_setup() and
+ * qemu_savevm_state_pending(), but do not need iterations until
+ * not in postcopy stage.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if state section is active else false
*/
bool (*is_active_iterate)(void *opaque);
- /* This runs outside the iothread lock in the migration case, and
+ /* This runs outside the BQL in the migration case, and
* within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected
* by other locks.
*/
+
+ /**
+ * @save_live_iterate
+ *
+ * Should send a chunk of data until the point that stream
+ * bandwidth limits tell it to stop. Each call generates one
+ * section.
+ *
+ * @f: QEMUFile where to send the data
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns 0 to indicate that there is still more data to send,
+ * 1 that there is no more data to send and
+ * negative to indicate an error.
+ */
int (*save_live_iterate)(QEMUFile *f, void *opaque);
- /* This runs outside the iothread lock! */
- int (*save_setup)(QEMUFile *f, void *opaque);
- void (*save_live_pending)(QEMUFile *f, void *opaque,
- uint64_t threshold_size,
- uint64_t *res_precopy_only,
- uint64_t *res_compatible,
- uint64_t *res_postcopy_only);
- /* Note for save_live_pending:
- * - res_precopy_only is for data which must be migrated in precopy phase
- * or in stopped state, in other words - before target vm start
- * - res_compatible is for data which may be migrated in any phase
- * - res_postcopy_only is for data which must be migrated in postcopy phase
- * or in stopped state, in other words - after source vm stop
+ /* This runs outside the BQL! */
+
+ /**
+ * @state_pending_estimate
+ *
+ * This estimates the remaining data to transfer
+ *
+ * Sum of @can_postcopy and @must_postcopy is the whole amount of
+ * pending data.
*
- * Sum of res_postcopy_only, res_compatible and res_postcopy_only is the
- * whole amount of pending data.
+ * @opaque: data pointer passed to register_savevm_live()
+ * @must_precopy: amount of data that must be migrated in precopy
+ * or in stopped state, i.e. that must be migrated
+ * before target start.
+ * @can_postcopy: amount of data that can be migrated in postcopy
+ * or in stopped state, i.e. after target start.
+ * Some can also be migrated during precopy (RAM).
+ * Some must be migrated after source stops
+ * (block-dirty-bitmap)
*/
+ void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
+ /**
+ * @state_pending_exact
+ *
+ * This calculates the exact remaining data to transfer
+ *
+ * Sum of @can_postcopy and @must_postcopy is the whole amount of
+ * pending data.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ * @must_precopy: amount of data that must be migrated in precopy
+ * or in stopped state, i.e. that must be migrated
+ * before target start.
+ * @can_postcopy: amount of data that can be migrated in postcopy
+ * or in stopped state, i.e. after target start.
+ * Some can also be migrated during precopy (RAM).
+ * Some must be migrated after source stops
+ * (block-dirty-bitmap)
+ */
+ void (*state_pending_exact)(void *opaque, uint64_t *must_precopy,
+ uint64_t *can_postcopy);
+
+ /**
+ * @load_state
+ *
+ * Load sections generated by any of the save functions that
+ * generate sections.
+ *
+ * Legacy method. Should be deprecated when all users are ported
+ * to VMStateDescription.
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @version_id: the maximum version_id supported
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*load_state)(QEMUFile *f, void *opaque, int version_id);
+
+ /**
+ * @load_setup
+ *
+ * Initializes the data structures on the destination.
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: data pointer passed to register_savevm_live()
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Returns zero to indicate success and negative for error
+ */
+ int (*load_setup)(QEMUFile *f, void *opaque, Error **errp);
- LoadStateHandler *load_state;
- int (*load_setup)(QEMUFile *f, void *opaque);
+ /**
+ * @load_cleanup
+ *
+ * Uninitializes the data structures on the destination.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*load_cleanup)(void *opaque);
- /* Called when postcopy migration wants to resume from failure */
+
+ /**
+ * @resume_prepare
+ *
+ * Called when postcopy migration wants to resume from failure
+ *
+ * @s: Current migration state
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns zero to indicate success and negative for error
+ */
int (*resume_prepare)(MigrationState *s, void *opaque);
+
+ /**
+ * @switchover_ack_needed
+ *
+ * Checks if switchover ack should be used. Called only on
+ * destination.
+ *
+ * @opaque: data pointer passed to register_savevm_live()
+ *
+ * Returns true if switchover ack should be used and false
+ * otherwise
+ */
+ bool (*switchover_ack_needed)(void *opaque);
} SaveVMHandlers;
+/**
+ * register_savevm_live: Register a set of custom migration handlers
+ *
+ * @idstr: state section identifier
+ * @instance_id: instance id
+ * @version_id: version id supported
+ * @ops: SaveVMHandlers structure
+ * @opaque: data pointer passed to SaveVMHandlers handlers
+ */
int register_savevm_live(const char *idstr,
uint32_t instance_id,
int version_id,
const SaveVMHandlers *ops,
void *opaque);
+/**
+ * unregister_savevm: Unregister custom migration handlers
+ *
+ * @obj: object associated with state section
+ * @idstr: state section identifier
+ * @opaque: data pointer passed to register_savevm_live()
+ */
void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque);
#endif
diff --git a/include/migration/snapshot.h b/include/migration/snapshot.h
index c85b6ec75b..9e4dcaaa75 100644
--- a/include/migration/snapshot.h
+++ b/include/migration/snapshot.h
@@ -15,7 +15,57 @@
#ifndef QEMU_MIGRATION_SNAPSHOT_H
#define QEMU_MIGRATION_SNAPSHOT_H
-int save_snapshot(const char *name, Error **errp);
-int load_snapshot(const char *name, Error **errp);
+#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-run-state.h"
+
+/**
+ * save_snapshot: Save an internal snapshot.
+ * @name: name of internal snapshot
+ * @overwrite: replace existing snapshot with @name
+ * @vmstate: blockdev node name to store VM state in
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool save_snapshot(const char *name, bool overwrite,
+ const char *vmstate,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * load_snapshot: Load an internal snapshot.
+ * @name: name of internal snapshot
+ * @vmstate: blockdev node name to load VM state from
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool load_snapshot(const char *name,
+ const char *vmstate,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * delete_snapshot: Delete a snapshot.
+ * @name: path to snapshot
+ * @has_devices: whether to use explicit device list
+ * @devices: explicit device list to snapshot
+ * @errp: pointer to error object
+ * On success, return %true.
+ * On failure, store an error through @errp and return %false.
+ */
+bool delete_snapshot(const char *name,
+ bool has_devices, strList *devices,
+ Error **errp);
+
+/**
+ * load_snapshot_resume: Restore runstate after loading snapshot.
+ * @state: state to restore
+ */
+void load_snapshot_resume(RunState state);
#endif
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index f68ed7db13..294d2d8486 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -41,9 +41,11 @@ typedef struct VMStateField VMStateField;
*/
struct VMStateInfo {
const char *name;
- int (*get)(QEMUFile *f, void *pv, size_t size, const VMStateField *field);
- int (*put)(QEMUFile *f, void *pv, size_t size, const VMStateField *field,
- QJSON *vmdesc);
+ int coroutine_mixed_fn (*get)(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field);
+ int coroutine_mixed_fn (*put)(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field,
+ JSONWriter *vmdesc);
};
enum VMStateFlags {
@@ -147,12 +149,16 @@ enum VMStateFlags {
* VMStateField.struct_version_id to tell which version of the
* structure we are referencing to use. */
VMS_VSTRUCT = 0x8000,
+
+ /* Marker for end of list */
+ VMS_END = 0x10000
};
typedef enum {
MIG_PRI_DEFAULT = 0,
MIG_PRI_IOMMU, /* Must happen before PCI devices */
MIG_PRI_PCI_BUS, /* Must happen before IOMMU */
+ MIG_PRI_VIRTIO_MEM, /* Must happen before IOMMU */
MIG_PRI_GICV3_ITS, /* Must happen before PCI devices */
MIG_PRI_GICV3, /* Must happen before the ITS */
MIG_PRI_MAX,
@@ -177,12 +183,24 @@ struct VMStateField {
struct VMStateDescription {
const char *name;
- int unmigratable;
+ bool unmigratable;
+ /*
+ * This VMSD describes something that should be sent during setup phase
+ * of migration. It plays similar role as save_setup() for explicitly
+ * registered vmstate entries, so it can be seen as a way to describe
+ * save_setup() in VMSD structures.
+ *
+ * Note that for now, a SaveStateEntry cannot have a VMSD and
+ * operations (e.g., save_setup()) set at the same time. Consequently,
+ * save_setup() and a VMSD with early_setup set to true are mutually
+ * exclusive. For this reason, also early_setup VMSDs are migrated in a
+ * QEMU_VM_SECTION_FULL section, while save_setup() data is migrated in
+ * a QEMU_VM_SECTION_START section.
+ */
+ bool early_setup;
int version_id;
int minimum_version_id;
- int minimum_version_id_old;
MigrationPriority priority;
- LoadStateHandler *load_state_old;
int (*pre_load)(void *opaque);
int (*post_load)(void *opaque, int version_id);
int (*pre_save)(void *opaque);
@@ -191,11 +209,9 @@ struct VMStateDescription {
bool (*dev_unplug_pending)(void *opaque);
const VMStateField *fields;
- const VMStateDescription **subsections;
+ const VMStateDescription * const *subsections;
};
-extern const VMStateDescription vmstate_dummy;
-
extern const VMStateInfo vmstate_info_bool;
extern const VMStateInfo vmstate_info_int8;
@@ -219,7 +235,6 @@ extern const VMStateInfo vmstate_info_uint64;
#define VMS_NULLPTR_MARKER (0x30U) /* '0' */
extern const VMStateInfo vmstate_info_nullptr;
-extern const VMStateInfo vmstate_info_float64;
extern const VMStateInfo vmstate_info_cpudouble;
extern const VMStateInfo vmstate_info_timer;
@@ -709,8 +724,9 @@ extern const VMStateInfo vmstate_info_qlist;
* '_state' type
* That the pointer is right at the start of _tmp_type.
*/
-#define VMSTATE_WITH_TMP(_state, _tmp_type, _vmsd) { \
+#define VMSTATE_WITH_TMP_TEST(_state, _test, _tmp_type, _vmsd) { \
.name = "tmp", \
+ .field_exists = (_test), \
.size = sizeof(_tmp_type) + \
QEMU_BUILD_BUG_ON_ZERO(offsetof(_tmp_type, parent) != 0) + \
type_check_pointer(_state, \
@@ -719,6 +735,9 @@ extern const VMStateInfo vmstate_info_qlist;
.info = &vmstate_info_tmp, \
}
+#define VMSTATE_WITH_TMP(_state, _tmp_type, _vmsd) \
+ VMSTATE_WITH_TMP_TEST(_state, NULL, _tmp_type, _vmsd)
+
#define VMSTATE_UNUSED_BUFFER(_test, _version, _size) { \
.name = "unused", \
.field_exists = (_test), \
@@ -742,8 +761,9 @@ extern const VMStateInfo vmstate_info_qlist;
/* _field_size should be a int32_t field in the _state struct giving the
* size of the bitmap _field in bits.
*/
-#define VMSTATE_BITMAP(_field, _state, _version, _field_size) { \
+#define VMSTATE_BITMAP_TEST(_field, _state, _test, _version, _field_size) { \
.name = (stringify(_field)), \
+ .field_exists = (_test), \
.version_id = (_version), \
.size_offset = vmstate_offset_value(_state, _field_size, int32_t),\
.info = &vmstate_info_bitmap, \
@@ -751,6 +771,9 @@ extern const VMStateInfo vmstate_info_qlist;
.offset = offsetof(_state, _field), \
}
+#define VMSTATE_BITMAP(_field, _state, _version, _field_size) \
+ VMSTATE_BITMAP_TEST(_field, _state, NULL, _version, _field_size)
+
/* For migrating a QTAILQ.
* Target QTAILQ needs be properly initialized.
* _type: type of QTAILQ element
@@ -997,12 +1020,6 @@ extern const VMStateInfo vmstate_info_qlist;
VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint64, uint64_t)
-#define VMSTATE_FLOAT64_V(_f, _s, _v) \
- VMSTATE_SINGLE(_f, _s, _v, vmstate_info_float64, float64)
-
-#define VMSTATE_FLOAT64(_f, _s) \
- VMSTATE_FLOAT64_V(_f, _s, 0)
-
#define VMSTATE_TIMER_PTR_TEST(_f, _s, _test) \
VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *)
@@ -1114,12 +1131,6 @@ extern const VMStateInfo vmstate_info_qlist;
#define VMSTATE_INT64_ARRAY(_f, _s, _n) \
VMSTATE_INT64_ARRAY_V(_f, _s, _n, 0)
-#define VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, _v) \
- VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_float64, float64)
-
-#define VMSTATE_FLOAT64_ARRAY(_f, _s, _n) \
- VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, 0)
-
#define VMSTATE_CPUDOUBLE_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_cpudouble, CPU_DoubleU)
@@ -1177,16 +1188,21 @@ extern const VMStateInfo vmstate_info_qlist;
VMSTATE_UNUSED_BUFFER(_test, 0, _size)
#define VMSTATE_END_OF_LIST() \
- {}
+ { \
+ .flags = VMS_END, \
+ }
int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, int version_id);
int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, QJSON *vmdesc);
+ void *opaque, JSONWriter *vmdesc);
+int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque, JSONWriter *vmdesc, Error **errp);
int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, QJSON *vmdesc, int version_id);
+ void *opaque, JSONWriter *vmdesc,
+ int version_id, Error **errp);
-bool vmstate_save_needed(const VMStateDescription *vmsd, void *opaque);
+bool vmstate_section_needed(const VMStateDescription *vmsd, void *opaque);
#define VMSTATE_INSTANCE_ID_ANY -1
@@ -1197,7 +1213,15 @@ int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id,
int required_for_version,
Error **errp);
-/* Returns: 0 on success, -1 on failure */
+/**
+ * vmstate_register() - legacy function to register state
+ * serialisation description
+ *
+ * New code shouldn't be using this function as QOM-ified devices have
+ * dc->vmsd to store the serialisation description.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
static inline int vmstate_register(VMStateIf *obj, int instance_id,
const VMStateDescription *vmsd,
void *opaque)
@@ -1206,6 +1230,34 @@ static inline int vmstate_register(VMStateIf *obj, int instance_id,
opaque, -1, 0, NULL);
}
+/**
+ * vmstate_replace_hack_for_ppc() - ppc used to abuse vmstate_register
+ *
+ * Don't even think about using this function in new code.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id,
+ const VMStateDescription *vmsd,
+ void *opaque);
+
+/**
+ * vmstate_register_any() - legacy function to register state
+ * serialisation description and let the function choose the id
+ *
+ * New code shouldn't be using this function as QOM-ified devices have
+ * dc->vmsd to store the serialisation description.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static inline int vmstate_register_any(VMStateIf *obj,
+ const VMStateDescription *vmsd,
+ void *opaque)
+{
+ return vmstate_register_with_alias_id(obj, VMSTATE_INSTANCE_ID_ANY, vmsd,
+ opaque, -1, 0, NULL);
+}
+
void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
void *opaque);
diff --git a/include/monitor/hmp-target.h b/include/monitor/hmp-target.h
index 8b7820a3ad..d78e979f05 100644
--- a/include/monitor/hmp-target.h
+++ b/include/monitor/hmp-target.h
@@ -33,20 +33,29 @@
struct MonitorDef {
const char *name;
int offset;
- target_long (*get_value)(const struct MonitorDef *md, int val);
+ target_long (*get_value)(Monitor *mon, const struct MonitorDef *md,
+ int val);
int type;
};
const MonitorDef *target_monitor_defs(void);
int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval);
-CPUArchState *mon_get_cpu_env(void);
-CPUState *mon_get_cpu(void);
+CPUArchState *mon_get_cpu_env(Monitor *mon);
+CPUState *mon_get_cpu(Monitor *mon);
void hmp_info_mem(Monitor *mon, const QDict *qdict);
void hmp_info_tlb(Monitor *mon, const QDict *qdict);
void hmp_mce(Monitor *mon, const QDict *qdict);
void hmp_info_local_apic(Monitor *mon, const QDict *qdict);
-void hmp_info_io_apic(Monitor *mon, const QDict *qdict);
+void hmp_info_sev(Monitor *mon, const QDict *qdict);
+void hmp_info_sgx(Monitor *mon, const QDict *qdict);
+void hmp_info_via(Monitor *mon, const QDict *qdict);
+void hmp_memory_dump(Monitor *mon, const QDict *qdict);
+void hmp_physical_memory_dump(Monitor *mon, const QDict *qdict);
+void hmp_info_registers(Monitor *mon, const QDict *qdict);
+void hmp_gva2gpa(Monitor *mon, const QDict *qdict);
+void hmp_gpa2hva(Monitor *mon, const QDict *qdict);
+void hmp_gpa2hpa(Monitor *mon, const QDict *qdict);
#endif /* MONITOR_HMP_TARGET_H */
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index c986cfd28b..f4cf8f6717 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -15,8 +15,11 @@
#define HMP_H
#include "qemu/readline.h"
+#include "qapi/qapi-types-common.h"
-void hmp_handle_error(Monitor *mon, Error *err);
+bool hmp_handle_error(Monitor *mon, Error *err);
+void hmp_help_cmd(Monitor *mon, const char *name);
+strList *hmp_split_at_comma(const char *str);
void hmp_info_name(Monitor *mon, const QDict *qdict);
void hmp_info_version(Monitor *mon, const QDict *qdict);
@@ -28,14 +31,12 @@ void hmp_info_mice(Monitor *mon, const QDict *qdict);
void hmp_info_migrate(Monitor *mon, const QDict *qdict);
void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict);
void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict);
-void hmp_info_migrate_cache_size(Monitor *mon, const QDict *qdict);
void hmp_info_cpus(Monitor *mon, const QDict *qdict);
void hmp_info_vnc(Monitor *mon, const QDict *qdict);
void hmp_info_spice(Monitor *mon, const QDict *qdict);
void hmp_info_balloon(Monitor *mon, const QDict *qdict);
void hmp_info_irq(Monitor *mon, const QDict *qdict);
void hmp_info_pic(Monitor *mon, const QDict *qdict);
-void hmp_info_rdma(Monitor *mon, const QDict *qdict);
void hmp_info_pci(Monitor *mon, const QDict *qdict);
void hmp_info_tpm(Monitor *mon, const QDict *qdict);
void hmp_info_iothreads(Monitor *mon, const QDict *qdict);
@@ -54,6 +55,7 @@ void hmp_ringbuf_read(Monitor *mon, const QDict *qdict);
void hmp_cont(Monitor *mon, const QDict *qdict);
void hmp_system_wakeup(Monitor *mon, const QDict *qdict);
void hmp_nmi(Monitor *mon, const QDict *qdict);
+void hmp_info_network(Monitor *mon, const QDict *qdict);
void hmp_set_link(Monitor *mon, const QDict *qdict);
void hmp_balloon(Monitor *mon, const QDict *qdict);
void hmp_loadvm(Monitor *mon, const QDict *qdict);
@@ -64,17 +66,22 @@ void hmp_migrate_continue(Monitor *mon, const QDict *qdict);
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
void hmp_migrate_recover(Monitor *mon, const QDict *qdict);
void hmp_migrate_pause(Monitor *mon, const QDict *qdict);
-void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict);
-void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict);
-void hmp_migrate_set_cache_size(Monitor *mon, const QDict *qdict);
void hmp_client_migrate_info(Monitor *mon, const QDict *qdict);
void hmp_migrate_start_postcopy(Monitor *mon, const QDict *qdict);
void hmp_x_colo_lost_heartbeat(Monitor *mon, const QDict *qdict);
void hmp_set_password(Monitor *mon, const QDict *qdict);
void hmp_expire_password(Monitor *mon, const QDict *qdict);
void hmp_change(Monitor *mon, const QDict *qdict);
+#ifdef CONFIG_VNC
+void hmp_change_vnc(Monitor *mon, const char *device, const char *target,
+ const char *arg, const char *read_only, bool force,
+ Error **errp);
+#endif
+void hmp_change_medium(Monitor *mon, const char *device, const char *target,
+ const char *arg, const char *read_only, bool force,
+ Error **errp);
void hmp_migrate(Monitor *mon, const QDict *qdict);
void hmp_device_add(Monitor *mon, const QDict *qdict);
void hmp_device_del(Monitor *mon, const QDict *qdict);
@@ -83,13 +90,15 @@ void hmp_netdev_add(Monitor *mon, const QDict *qdict);
void hmp_netdev_del(Monitor *mon, const QDict *qdict);
void hmp_getfd(Monitor *mon, const QDict *qdict);
void hmp_closefd(Monitor *mon, const QDict *qdict);
+void hmp_mouse_move(Monitor *mon, const QDict *qdict);
+void hmp_mouse_button(Monitor *mon, const QDict *qdict);
+void hmp_mouse_set(Monitor *mon, const QDict *qdict);
void hmp_sendkey(Monitor *mon, const QDict *qdict);
-void hmp_screendump(Monitor *mon, const QDict *qdict);
+void coroutine_fn hmp_screendump(Monitor *mon, const QDict *qdict);
void hmp_chardev_add(Monitor *mon, const QDict *qdict);
void hmp_chardev_change(Monitor *mon, const QDict *qdict);
void hmp_chardev_remove(Monitor *mon, const QDict *qdict);
void hmp_chardev_send_break(Monitor *mon, const QDict *qdict);
-void hmp_cpu_add(Monitor *mon, const QDict *qdict);
void hmp_object_add(Monitor *mon, const QDict *qdict);
void hmp_object_del(Monitor *mon, const QDict *qdict);
void hmp_info_memdev(Monitor *mon, const QDict *qdict);
@@ -99,6 +108,13 @@ void hmp_qom_list(Monitor *mon, const QDict *qdict);
void hmp_qom_get(Monitor *mon, const QDict *qdict);
void hmp_qom_set(Monitor *mon, const QDict *qdict);
void hmp_info_qom_tree(Monitor *mon, const QDict *dict);
+void hmp_virtio_query(Monitor *mon, const QDict *qdict);
+void hmp_virtio_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
+void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
@@ -129,6 +145,40 @@ void hmp_info_ramblock(Monitor *mon, const QDict *qdict);
void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict);
void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict);
void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict);
-void hmp_info_sev(Monitor *mon, const QDict *qdict);
+void hmp_info_replay(Monitor *mon, const QDict *qdict);
+void hmp_replay_break(Monitor *mon, const QDict *qdict);
+void hmp_replay_delete_break(Monitor *mon, const QDict *qdict);
+void hmp_replay_seek(Monitor *mon, const QDict *qdict);
+void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict);
+void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict);
+void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_human_readable_text_helper(Monitor *mon,
+ HumanReadableText *(*qmp_handler)(Error **));
+void hmp_info_stats(Monitor *mon, const QDict *qdict);
+void hmp_one_insn_per_tb(Monitor *mon, const QDict *qdict);
+void hmp_watchdog_action(Monitor *mon, const QDict *qdict);
+void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict);
+void hmp_info_capture(Monitor *mon, const QDict *qdict);
+void hmp_stopcapture(Monitor *mon, const QDict *qdict);
+void hmp_wavcapture(Monitor *mon, const QDict *qdict);
+void hmp_trace_event(Monitor *mon, const QDict *qdict);
+void hmp_trace_file(Monitor *mon, const QDict *qdict);
+void hmp_info_trace_events(Monitor *mon, const QDict *qdict);
+void hmp_help(Monitor *mon, const QDict *qdict);
+void hmp_info_help(Monitor *mon, const QDict *qdict);
+void hmp_info_sync_profile(Monitor *mon, const QDict *qdict);
+void hmp_info_history(Monitor *mon, const QDict *qdict);
+void hmp_logfile(Monitor *mon, const QDict *qdict);
+void hmp_log(Monitor *mon, const QDict *qdict);
+void hmp_gdbserver(Monitor *mon, const QDict *qdict);
+void hmp_print(Monitor *mon, const QDict *qdict);
+void hmp_sum(Monitor *mon, const QDict *qdict);
+void hmp_ioport_read(Monitor *mon, const QDict *qdict);
+void hmp_ioport_write(Monitor *mon, const QDict *qdict);
+void hmp_boot_set(Monitor *mon, const QDict *qdict);
+void hmp_info_mtree(Monitor *mon, const QDict *qdict);
+void hmp_info_cryptodev(Monitor *mon, const QDict *qdict);
#endif
diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h
index 1018d754a6..965f5d5450 100644
--- a/include/monitor/monitor.h
+++ b/include/monitor/monitor.h
@@ -4,8 +4,8 @@
#include "block/block.h"
#include "qapi/qapi-types-misc.h"
#include "qemu/readline.h"
+#include "exec/hwaddr.h"
-extern __thread Monitor *cur_mon;
typedef struct MonitorHMP MonitorHMP;
typedef struct MonitorOptions MonitorOptions;
@@ -13,6 +13,8 @@ typedef struct MonitorOptions MonitorOptions;
extern QemuOptsList qemu_mon_opts;
+Monitor *monitor_cur(void);
+Monitor *monitor_set_cur(Coroutine *co, Monitor *mon);
bool monitor_cur_is_qmp(void);
void monitor_init_globals(void);
@@ -29,23 +31,36 @@ void monitor_resume(Monitor *mon);
int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp);
int monitor_fd_param(Monitor *mon, const char *fdname, Error **errp);
+int monitor_puts(Monitor *mon, const char *str);
int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
- GCC_FMT_ATTR(2, 0);
-int monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 0);
+int monitor_printf(Monitor *mon, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
+void monitor_printc(Monitor *mon, int ch);
void monitor_flush(Monitor *mon);
-int monitor_set_cpu(int cpu_index);
-int monitor_get_cpu_index(void);
+int monitor_set_cpu(Monitor *mon, int cpu_index);
+int monitor_get_cpu_index(Monitor *mon);
+
+int monitor_puts_locked(Monitor *mon, const char *str);
+void monitor_flush_locked(Monitor *mon);
+
+void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, uint64_t size, Error **errp);
void monitor_read_command(MonitorHMP *mon, int show_prompt);
int monitor_read_password(MonitorHMP *mon, ReadLineFunc *readline_func,
void *opaque);
AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
- bool has_opaque, const char *opaque,
- Error **errp);
-int monitor_fdset_get_fd(int64_t fdset_id, int flags);
-int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd);
+ const char *opaque, Error **errp);
+int monitor_fdset_dup_fd_add(int64_t fdset_id, int flags);
void monitor_fdset_dup_fd_remove(int dup_fd);
int64_t monitor_fdset_dup_fd_find(int dup_fd);
+void monitor_register_hmp(const char *name, bool info,
+ void (*cmd)(Monitor *mon, const QDict *qdict));
+void monitor_register_hmp_info_hrt(const char *name,
+ HumanReadableText *(*handler)(Error **errp));
+
+int error_vprintf_unless_qmp(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int error_printf_unless_qmp(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+
#endif /* MONITOR_H */
diff --git a/include/monitor/qdev.h b/include/monitor/qdev.h
index eaa947d73a..1d57bf6577 100644
--- a/include/monitor/qdev.h
+++ b/include/monitor/qdev.h
@@ -9,6 +9,31 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp);
int qdev_device_help(QemuOpts *opts);
DeviceState *qdev_device_add(QemuOpts *opts, Error **errp);
-void qdev_set_id(DeviceState *dev, const char *id);
+DeviceState *qdev_device_add_from_qdict(const QDict *opts,
+ bool from_json, Error **errp);
+
+/**
+ * qdev_set_id: parent the device and set its id if provided.
+ * @dev: device to handle
+ * @id: id to be given to the device, or NULL.
+ *
+ * Returns: the id of the device in case of success; otherwise NULL.
+ *
+ * @dev must be unrealized, unparented and must not have an id.
+ *
+ * If @id is non-NULL, this function tries to setup @dev qom path as
+ * "/peripheral/id". If @id is already taken, it fails. If it succeeds,
+ * the id field of @dev is set to @id (@dev now owns the given @id
+ * parameter).
+ *
+ * If @id is NULL, this function generates a unique name and setups @dev
+ * qom path as "/peripheral-anon/name". This name is not set as the id
+ * of @dev.
+ *
+ * Upon success, it returns the id/name (generated or provided). The
+ * returned string is owned by the corresponding child property and must
+ * not be freed by the caller.
+ */
+const char *qdev_set_id(DeviceState *dev, char *id, Error **errp);
#endif
diff --git a/include/monitor/qmp-helpers.h b/include/monitor/qmp-helpers.h
new file mode 100644
index 0000000000..318c3357a2
--- /dev/null
+++ b/include/monitor/qmp-helpers.h
@@ -0,0 +1,29 @@
+/*
+ * QMP command helpers
+ *
+ * Copyright (c) 2022 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef MONITOR_QMP_HELPERS_H
+
+bool qmp_add_client_spice(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#ifdef CONFIG_VNC
+bool qmp_add_client_vnc(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#endif
+#ifdef CONFIG_DBUS_DISPLAY
+bool qmp_add_client_dbus_display(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, Error **errp);
+#endif
+bool qmp_add_client_char(int fd, bool has_skipauth, bool skipauth,
+ bool has_tls, bool tls, const char *protocol,
+ Error **errp);
+
+#endif
diff --git a/include/net/can_emu.h b/include/net/can_emu.h
index fce9770928..6f9b206bb5 100644
--- a/include/net/can_emu.h
+++ b/include/net/can_emu.h
@@ -46,7 +46,8 @@ typedef uint32_t qemu_canid_t;
typedef struct qemu_can_frame {
qemu_canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
uint8_t can_dlc; /* data length code: 0 .. 8 */
- uint8_t data[8] QEMU_ALIGNED(8);
+ uint8_t flags;
+ uint8_t data[64] QEMU_ALIGNED(8);
} qemu_can_frame;
/* Keep defines for QEMU separate from Linux ones for now */
@@ -58,6 +59,10 @@ typedef struct qemu_can_frame {
#define QEMU_CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
#define QEMU_CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */
+#define QEMU_CAN_FRMF_BRS 0x01 /* bit rate switch (2nd bitrate for data) */
+#define QEMU_CAN_FRMF_ESI 0x02 /* error state ind. of transmitting node */
+#define QEMU_CAN_FRMF_TYPE_FD 0x10 /* internal bit ind. of CAN FD frame */
+
/**
* struct qemu_can_filter - CAN ID based filter in can_register().
* @can_id: relevant bits of CAN ID which are not masked out.
@@ -97,15 +102,11 @@ struct CanBusClientState {
char *model;
char *name;
void (*destructor)(CanBusClientState *);
+ bool fd_mode;
};
#define TYPE_CAN_BUS "can-bus"
-#define CAN_BUS_CLASS(klass) \
- OBJECT_CLASS_CHECK(CanBusClass, (klass), TYPE_CAN_BUS)
-#define CAN_BUS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CanBusClass, (obj), TYPE_CAN_BUS)
-#define CAN_BUS(obj) \
- OBJECT_CHECK(CanBusState, (obj), TYPE_CAN_BUS)
+OBJECT_DECLARE_SIMPLE_TYPE(CanBusState, CAN_BUS)
int can_bus_filter_match(struct qemu_can_filter *filter, qemu_canid_t can_id);
@@ -121,4 +122,8 @@ int can_bus_client_set_filters(CanBusClientState *,
const struct qemu_can_filter *filters,
size_t filters_cnt);
+uint8_t can_dlc2len(uint8_t can_dlc);
+
+uint8_t can_len2dlc(uint8_t len);
+
#endif
diff --git a/include/net/can_host.h b/include/net/can_host.h
index d79676746b..caab71bdda 100644
--- a/include/net/can_host.h
+++ b/include/net/can_host.h
@@ -29,27 +29,23 @@
#define NET_CAN_HOST_H
#include "net/can_emu.h"
+#include "qom/object.h"
#define TYPE_CAN_HOST "can-host"
-#define CAN_HOST_CLASS(klass) \
- OBJECT_CLASS_CHECK(CanHostClass, (klass), TYPE_CAN_HOST)
-#define CAN_HOST_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CanHostClass, (obj), TYPE_CAN_HOST)
-#define CAN_HOST(obj) \
- OBJECT_CHECK(CanHostState, (obj), TYPE_CAN_HOST)
-
-typedef struct CanHostState {
- ObjectClass oc;
+OBJECT_DECLARE_TYPE(CanHostState, CanHostClass, CAN_HOST)
+
+struct CanHostState {
+ Object oc;
CanBusState *bus;
CanBusClientState bus_client;
-} CanHostState;
+};
-typedef struct CanHostClass {
+struct CanHostClass {
ObjectClass oc;
void (*connect)(CanHostState *ch, Error **errp);
void (*disconnect)(CanHostState *ch);
-} CanHostClass;
+};
#endif
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 05a0d273fe..7dec37e56c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -21,11 +21,16 @@
#include "qemu/bswap.h"
struct iovec;
+#define CSUM_IP 0x01
+#define CSUM_TCP 0x02
+#define CSUM_UDP 0x04
+#define CSUM_ALL (CSUM_IP | CSUM_TCP | CSUM_UDP)
+
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq);
uint16_t net_checksum_finish(uint32_t sum);
uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
uint8_t *addrs, uint8_t *buf);
-void net_checksum_calculate(uint8_t *data, int length);
+void net_checksum_calculate(uint8_t *data, int length, int csum_flag);
static inline uint32_t
net_checksum_add(int len, uint8_t *buf)
diff --git a/include/net/eth.h b/include/net/eth.h
index 0671be6916..3b80b6e07f 100644
--- a/include/net/eth.h
+++ b/include/net/eth.h
@@ -31,6 +31,9 @@
#define ETH_ALEN 6
#define ETH_HLEN 14
+#define ETH_ZLEN 60 /* Min. octets in frame without FCS */
+#define ETH_FCS_LEN 4
+#define ETH_MTU 1500
struct eth_header {
uint8_t h_dest[ETH_ALEN]; /* destination eth addr */
@@ -158,7 +161,7 @@ struct tcp_hdr {
u_short th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgment number */
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
u_char th_off : 4, /* data offset */
th_x2:4; /* (unused) */
#else
@@ -221,6 +224,7 @@ struct tcp_hdr {
#define IP_HEADER_VERSION_6 (6)
#define IP_PROTO_TCP (6)
#define IP_PROTO_UDP (17)
+#define IP_PROTO_SCTP (132)
#define IPTOS_ECN_MASK 0x03
#define IPTOS_ECN(x) ((x) & IPTOS_ECN_MASK)
#define IPTOS_ECN_CE 0x03
@@ -311,10 +315,10 @@ eth_get_l2_hdr_length(const void *p)
}
static inline uint32_t
-eth_get_l2_hdr_length_iov(const struct iovec *iov, int iovcnt)
+eth_get_l2_hdr_length_iov(const struct iovec *iov, size_t iovcnt, size_t iovoff)
{
uint8_t p[sizeof(struct eth_header) + sizeof(struct vlan_header)];
- size_t copied = iov_to_buf(iov, iovcnt, 0, p, ARRAY_SIZE(p));
+ size_t copied = iov_to_buf(iov, iovcnt, iovoff, p, ARRAY_SIZE(p));
if (copied < ARRAY_SIZE(p)) {
return copied;
@@ -339,26 +343,19 @@ eth_get_pkt_tci(const void *p)
size_t
eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff,
- uint8_t *new_ehdr_buf,
+ void *new_ehdr_buf,
uint16_t *payload_offset, uint16_t *tci);
size_t
-eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff,
- uint16_t vet, uint8_t *new_ehdr_buf,
+eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, int index,
+ uint16_t vet, uint16_t vet_ext, void *new_ehdr_buf,
uint16_t *payload_offset, uint16_t *tci);
uint16_t
eth_get_l3_proto(const struct iovec *l2hdr_iov, int iovcnt, size_t l2hdr_len);
-void eth_setup_vlan_headers_ex(struct eth_header *ehdr, uint16_t vlan_tag,
- uint16_t vlan_ethtype, bool *is_new);
-
-static inline void
-eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
- bool *is_new)
-{
- eth_setup_vlan_headers_ex(ehdr, vlan_tag, ETH_P_VLAN, is_new);
-}
+void eth_setup_vlan_headers(struct eth_header *ehdr, size_t *ehdr_size,
+ uint16_t vlan_tag, uint16_t vlan_ethtype);
uint8_t eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto);
@@ -380,18 +377,25 @@ typedef struct eth_ip4_hdr_info_st {
bool fragment;
} eth_ip4_hdr_info;
+typedef enum EthL4HdrProto {
+ ETH_L4_HDR_PROTO_INVALID,
+ ETH_L4_HDR_PROTO_TCP,
+ ETH_L4_HDR_PROTO_UDP,
+ ETH_L4_HDR_PROTO_SCTP
+} EthL4HdrProto;
+
typedef struct eth_l4_hdr_info_st {
union {
struct tcp_header tcp;
struct udp_header udp;
} hdr;
+ EthL4HdrProto proto;
bool has_tcp_data;
} eth_l4_hdr_info;
-void eth_get_protocols(const struct iovec *iov, int iovcnt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp,
+void eth_get_protocols(const struct iovec *iov, size_t iovcnt, size_t iovoff,
+ bool *hasip4, bool *hasip6,
size_t *l3hdr_off,
size_t *l4hdr_off,
size_t *l5hdr_off,
@@ -399,11 +403,6 @@ void eth_get_protocols(const struct iovec *iov, int iovcnt,
eth_ip4_hdr_info *ip4hdr_info,
eth_l4_hdr_info *l4hdr_info);
-void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
- void *l3hdr, size_t l3hdr_len,
- size_t l3payload_len,
- size_t frag_offset, bool more_frags);
-
void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len);
@@ -422,4 +421,20 @@ bool
eth_parse_ipv6_hdr(const struct iovec *pkt, int pkt_frags,
size_t ip6hdr_off, eth_ip6_hdr_info *info);
+/**
+ * eth_pad_short_frame - pad a short frame to the minimum Ethernet frame length
+ *
+ * If the Ethernet frame size is shorter than 60 bytes, it will be padded to
+ * 60 bytes at the address @padded_pkt.
+ *
+ * @padded_pkt: buffer address to hold the padded frame
+ * @padded_buflen: pointer holding length of @padded_pkt. If the frame is
+ * padded, the length will be updated to the padded one.
+ * @pkt: address to hold the original Ethernet frame
+ * @pkt_size: size of the original Ethernet frame
+ * @return true if the frame is padded, otherwise false
+ */
+bool eth_pad_short_frame(uint8_t *padded_pkt, size_t *padded_buflen,
+ const void *pkt, size_t pkt_size);
+
#endif
diff --git a/include/net/filter.h b/include/net/filter.h
index 9393c59192..f15f7932b2 100644
--- a/include/net/filter.h
+++ b/include/net/filter.h
@@ -9,18 +9,13 @@
#ifndef QEMU_NET_FILTER_H
#define QEMU_NET_FILTER_H
-#include "qapi/qapi-types-net.h"
+#include "qapi/qapi-types-common.h"
#include "qemu/queue.h"
#include "qom/object.h"
#include "net/queue.h"
#define TYPE_NETFILTER "netfilter"
-#define NETFILTER(obj) \
- OBJECT_CHECK(NetFilterState, (obj), TYPE_NETFILTER)
-#define NETFILTER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(NetFilterClass, (obj), TYPE_NETFILTER)
-#define NETFILTER_CLASS(klass) \
- OBJECT_CLASS_CHECK(NetFilterClass, (klass), TYPE_NETFILTER)
+OBJECT_DECLARE_TYPE(NetFilterState, NetFilterClass, NETFILTER)
typedef void (FilterSetup) (NetFilterState *nf, Error **errp);
typedef void (FilterCleanup) (NetFilterState *nf);
@@ -40,7 +35,7 @@ typedef void (FilterStatusChanged) (NetFilterState *nf, Error **errp);
typedef void (FilterHandleEvent) (NetFilterState *nf, int event, Error **errp);
-typedef struct NetFilterClass {
+struct NetFilterClass {
ObjectClass parent_class;
/* optional */
@@ -50,7 +45,7 @@ typedef struct NetFilterClass {
FilterHandleEvent *handle_event;
/* mandatory */
FilterReceiveIOV *receive_iov;
-} NetFilterClass;
+};
struct NetFilterState {
diff --git a/include/net/net.h b/include/net/net.h
index e7ef42d62b..b1f9b35fcc 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -4,6 +4,7 @@
#include "qemu/queue.h"
#include "qapi/qapi-types-net.h"
#include "net/queue.h"
+#include "hw/qdev-properties-system.h"
#define MAC_FMT "%02X:%02X:%02X:%02X:%02X:%02X"
#define MAC_ARG(x) ((uint8_t *)(x))[0], ((uint8_t *)(x))[1], \
@@ -43,6 +44,9 @@ typedef struct NICConf {
typedef void (NetPoll)(NetClientState *, bool enable);
typedef bool (NetCanReceive)(NetClientState *);
+typedef int (NetStart)(NetClientState *);
+typedef int (NetLoad)(NetClientState *);
+typedef void (NetStop)(NetClientState *);
typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t);
typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
typedef void (NetCleanup) (NetClientState *);
@@ -50,16 +54,21 @@ typedef void (LinkStatusChanged)(NetClientState *);
typedef void (NetClientDestructor)(NetClientState *);
typedef RxFilterInfo *(QueryRxFilter)(NetClientState *);
typedef bool (HasUfo)(NetClientState *);
+typedef bool (HasUso)(NetClientState *);
typedef bool (HasVnetHdr)(NetClientState *);
typedef bool (HasVnetHdrLen)(NetClientState *, int);
+typedef bool (GetUsingVnetHdr)(NetClientState *);
typedef void (UsingVnetHdr)(NetClientState *, bool);
-typedef void (SetOffload)(NetClientState *, int, int, int, int, int);
+typedef void (SetOffload)(NetClientState *, int, int, int, int, int, int, int);
+typedef int (GetVnetHdrLen)(NetClientState *);
typedef void (SetVnetHdrLen)(NetClientState *, int);
typedef int (SetVnetLE)(NetClientState *, bool);
typedef int (SetVnetBE)(NetClientState *, bool);
typedef struct SocketReadState SocketReadState;
typedef void (SocketReadStateFinalize)(SocketReadState *rs);
typedef void (NetAnnounce)(NetClientState *);
+typedef bool (SetSteeringEBPF)(NetClientState *, int);
+typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **);
typedef struct NetClientInfo {
NetClientDriver type;
@@ -68,19 +77,27 @@ typedef struct NetClientInfo {
NetReceive *receive_raw;
NetReceiveIOV *receive_iov;
NetCanReceive *can_receive;
+ NetStart *start;
+ NetLoad *load;
+ NetStop *stop;
NetCleanup *cleanup;
LinkStatusChanged *link_status_changed;
QueryRxFilter *query_rx_filter;
NetPoll *poll;
HasUfo *has_ufo;
+ HasUso *has_uso;
HasVnetHdr *has_vnet_hdr;
HasVnetHdrLen *has_vnet_hdr_len;
+ GetUsingVnetHdr *get_using_vnet_hdr;
UsingVnetHdr *using_vnet_hdr;
SetOffload *set_offload;
+ GetVnetHdrLen *get_vnet_hdr_len;
SetVnetHdrLen *set_vnet_hdr_len;
SetVnetLE *set_vnet_le;
SetVnetBE *set_vnet_be;
NetAnnounce *announce;
+ SetSteeringEBPF *set_steering_ebpf;
+ NetCheckPeerType *check_peer_type;
} NetClientInfo;
struct NetClientState {
@@ -99,12 +116,17 @@ struct NetClientState {
int vring_enable;
int vnet_hdr_len;
bool is_netdev;
+ bool do_not_pad; /* do not pad to the minimum ethernet frame length */
+ bool is_datapath;
QTAILQ_HEAD(, NetFilterState) filters;
};
+typedef QTAILQ_HEAD(NetClientStateList, NetClientState) NetClientStateList;
+
typedef struct NICState {
NetClientState *ncs;
NICConf *conf;
+ MemReentrancyGuard *reentrancy_guard;
void *opaque;
bool peer_deleted;
} NICState;
@@ -130,10 +152,15 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
const char *name);
+NetClientState *qemu_new_net_control_client(NetClientInfo *info,
+ NetClientState *peer,
+ const char *model,
+ const char *name);
NICState *qemu_new_nic(NetClientInfo *info,
NICConf *conf,
const char *model,
const char *name,
+ MemReentrancyGuard *reentrancy_guard,
void *opaque);
void qemu_del_nic(NICState *nic);
NetClientState *qemu_get_subqueue(NICState *nic, int queue_index);
@@ -143,41 +170,122 @@ void *qemu_get_nic_opaque(NetClientState *nc);
void qemu_del_net_client(NetClientState *nc);
typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_receive_packet(NetClientState *nc);
int qemu_can_send_packet(NetClientState *nc);
ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
int iovcnt);
ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
int iovcnt, NetPacketSent *sent_cb);
ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_receive_packet_iov(NetClientState *nc,
+ const struct iovec *iov,
+ int iovcnt);
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
int size, NetPacketSent *sent_cb);
void qemu_purge_queued_packets(NetClientState *nc);
void qemu_flush_queued_packets(NetClientState *nc);
void qemu_flush_or_purge_queued_packets(NetClientState *nc, bool purge);
+void qemu_set_info_str(NetClientState *nc,
+ const char *fmt, ...) G_GNUC_PRINTF(2, 3);
void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
bool qemu_has_ufo(NetClientState *nc);
+bool qemu_has_uso(NetClientState *nc);
bool qemu_has_vnet_hdr(NetClientState *nc);
bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
+bool qemu_get_using_vnet_hdr(NetClientState *nc);
void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
- int ecn, int ufo);
+ int ecn, int ufo, int uso4, int uso6);
+int qemu_get_vnet_hdr_len(NetClientState *nc);
void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
int qemu_set_vnet_le(NetClientState *nc, bool is_le);
int qemu_set_vnet_be(NetClientState *nc, bool is_be);
void qemu_macaddr_default_if_unset(MACAddr *macaddr);
-int qemu_show_nic_models(const char *arg, const char *const *models);
-void qemu_check_nic_model(NICInfo *nd, const char *model);
-int qemu_find_nic_model(NICInfo *nd, const char * const *models,
- const char *default_model);
+/**
+ * qemu_find_nic_info: Obtain NIC configuration information
+ * @typename: Name of device object type
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match (for user convenience and
+ * backward compatibility).
+ *
+ * Search for a NIC configuration matching the NIC model constraints.
+ */
+NICInfo *qemu_find_nic_info(const char *typename, bool match_default,
+ const char *alias);
+/**
+ * qemu_configure_nic_device: Apply NIC configuration to a given device
+ * @dev: Network device to be configured
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match
+ *
+ * Search for a NIC configuration for the provided device, using the
+ * additionally specified matching constraints. If found, apply the
+ * configuration using qdev_set_nic_properties() and return %true.
+ *
+ * This is used by platform code which creates the device anyway,
+ * regardless of whether there is a configuration for it. This tends
+ * to be platforms which ignore `--nodefaults` and create net devices
+ * anyway, for example because the Ethernet device on that board is
+ * always physically present.
+ */
+bool qemu_configure_nic_device(DeviceState *dev, bool match_default,
+ const char *alias);
+/**
+ * qemu_create_nic_device: Create a NIC device if a configuration exists for it
+ * @typename: Object typename of network device
+ * @match_default: Match NIC configurations with no model specified
+ * @alias: Additional model string to match
+ *
+ * Search for a NIC configuration for the provided device type. If found,
+ * create an object of the corresponding type and return it.
+ */
+DeviceState *qemu_create_nic_device(const char *typename, bool match_default,
+ const char *alias);
+
+/*
+ * qemu_create_nic_bus_devices: Create configured NIC devices for a given bus
+ * @bus: Bus on which to create devices
+ * @parent_type: Object type for devices to be created (e.g. TYPE_PCI_DEVICE)
+ * @default_model: Object type name for default NIC model (or %NULL)
+ * @alias: Additional model string to replace, for user convenience
+ * @alias_target: Actual object type name to be used in place of @alias
+ *
+ * Instantiate dynamic NICs on a given bus, typically a PCI bus. This scans
+ * for available NIC configurations which either specify a model which is
+ * a child type of @parent_type, or which do not specify a model when
+ * @default_model is non-NULL. Each device is instantiated on the given @bus.
+ *
+ * A single substitution is supported, e.g. "xen" → "xen-net-device" for the
+ * Xen bus, or "virtio" → "virtio-net-pci" for PCI. This allows the user to
+ * specify a more understandable "model=" parameter on the command line, not
+ * only the real object typename.
+ */
+void qemu_create_nic_bus_devices(BusState *bus, const char *parent_type,
+ const char *default_model,
+ const char *alias, const char *alias_target);
void print_net_client(Monitor *mon, NetClientState *nc);
-void hmp_info_network(Monitor *mon, const QDict *qdict);
void net_socket_rs_init(SocketReadState *rs,
SocketReadStateFinalize *finalize,
bool vnet_hdr);
NetClientState *qemu_get_peer(NetClientState *nc, int queue_index);
+/**
+ * qemu_get_nic_models:
+ * @device_type: Defines which devices should be taken into consideration
+ * (e.g. TYPE_DEVICE for all devices, or TYPE_PCI_DEVICE for PCI)
+ *
+ * Get an array of pointers to names of NIC devices that are available in
+ * the QEMU binary. The array is terminated with a NULL pointer entry.
+ * The caller is responsible for freeing the memory when it is not required
+ * anymore, e.g. with g_ptr_array_free(..., true).
+ *
+ * Returns: Pointer to the array that contains the pointers to the names.
+ */
+GPtrArray *qemu_get_nic_models(const char *device_type);
+
/* NIC info */
#define MAX_NICS 8
@@ -193,13 +301,13 @@ struct NICInfo {
int nvectors;
};
-extern int nb_nics;
-extern NICInfo nd_table[MAX_NICS];
-extern const char *host_net_devices[];
-
/* from net.c */
-int net_client_parse(QemuOptsList *opts_list, const char *str);
-int net_init_clients(Error **errp);
+extern NetClientStateList net_clients;
+bool netdev_is_modern(const char *optstr);
+void netdev_parse_modern(const char *optstr);
+void net_client_parse(QemuOptsList *opts_list, const char *optstr);
+void show_netdevs(void);
+void net_init_clients(void);
void net_check_clients(void);
void net_cleanup(void);
void hmp_host_net_add(Monitor *mon, const QDict *qdict);
@@ -209,8 +317,8 @@ void netdev_add(QemuOpts *opts, Error **errp);
int net_hub_id_for_client(NetClientState *nc, int *id);
NetClientState *net_hub_port_find(int hub_id);
-#define DEFAULT_NETWORK_SCRIPT "/etc/qemu-ifup"
-#define DEFAULT_NETWORK_DOWN_SCRIPT "/etc/qemu-ifdown"
+#define DEFAULT_NETWORK_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifup"
+#define DEFAULT_NETWORK_DOWN_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifdown"
#define DEFAULT_BRIDGE_HELPER CONFIG_QEMU_HELPERDIR "/qemu-bridge-helper"
#define DEFAULT_BRIDGE_INTERFACE "br0"
@@ -233,4 +341,9 @@ uint32_t net_crc32_le(const uint8_t *p, int len);
.offset = vmstate_offset_macaddr(_state, _field), \
}
+static inline bool net_peer_needs_padding(NetClientState *nc)
+{
+ return nc->peer && !nc->peer->do_not_pad;
+}
+
#endif
diff --git a/include/net/queue.h b/include/net/queue.h
index c0269bb1dc..9f2f289d77 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -55,6 +55,14 @@ void qemu_net_queue_append_iov(NetQueue *queue,
void qemu_del_net_queue(NetQueue *queue);
+ssize_t qemu_net_queue_receive(NetQueue *queue,
+ const uint8_t *data,
+ size_t size);
+
+ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
+ const struct iovec *iov,
+ int iovcnt);
+
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/include/net/vhost-user.h b/include/net/vhost-user.h
index 5bcd8a6285..35bf619709 100644
--- a/include/net/vhost-user.h
+++ b/include/net/vhost-user.h
@@ -14,5 +14,6 @@
struct vhost_net;
struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc);
uint64_t vhost_user_get_acked_features(NetClientState *nc);
+void vhost_user_save_acked_features(NetClientState *nc);
#endif /* VHOST_USER_H */
diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h
index 45e34b7cfc..b81f9a6f2a 100644
--- a/include/net/vhost-vdpa.h
+++ b/include/net/vhost-vdpa.h
@@ -15,7 +15,6 @@
#define TYPE_VHOST_VDPA "vhost-vdpa"
struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc);
-uint64_t vhost_vdpa_get_acked_features(NetClientState *nc);
extern const int vdpa_feature_bits[];
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 172b0051d8..c6a5361a2a 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -4,9 +4,6 @@
#include "net/net.h"
#include "hw/virtio/vhost-backend.h"
-#define VHOST_NET_INIT_FAILED \
- "vhost-net requested but could not be initialized"
-
struct vhost_net;
typedef struct vhost_net VHostNetState;
@@ -14,14 +11,17 @@ typedef struct VhostNetOptions {
VhostBackendType backend_type;
NetClientState *net_backend;
uint32_t busyloop_timeout;
+ unsigned int nvqs;
void *opaque;
} VhostNetOptions;
uint64_t vhost_net_get_max_queues(VHostNetState *net);
struct vhost_net *vhost_net_init(VhostNetOptions *options);
-int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
-void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
+int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
+void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
+ int data_queue_pairs, int cvq);
void vhost_net_cleanup(VHostNetState *net);
@@ -36,6 +36,8 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask);
int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
VHostNetState *get_vhost_net(NetClientState *nc);
@@ -45,4 +47,10 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net);
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
+void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
+ int vq_index);
+int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
+ int vq_index);
+
+void vhost_net_save_acked_features(NetClientState *nc);
#endif
diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h
new file mode 100644
index 0000000000..8b7b25c0b5
--- /dev/null
+++ b/include/qapi/compat-policy.h
@@ -0,0 +1,45 @@
+/*
+ * Policy for handling "funny" management interfaces
+ *
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef QAPI_COMPAT_POLICY_H
+#define QAPI_COMPAT_POLICY_H
+
+#include "qapi/error.h"
+#include "qapi/qapi-types-compat.h"
+
+extern CompatPolicy compat_policy;
+
+bool compat_policy_input_ok(unsigned special_features,
+ const CompatPolicy *policy,
+ ErrorClass error_class,
+ const char *kind, const char *name,
+ Error **errp);
+
+/*
+ * Create a QObject input visitor for @obj for use with QMP
+ *
+ * This is like qobject_input_visitor_new(), except it obeys the
+ * policy for handling deprecated management interfaces set with
+ * -compat.
+ */
+Visitor *qobject_input_visitor_new_qmp(QObject *obj);
+
+/*
+ * Create a QObject output visitor for @obj for use with QMP
+ *
+ * This is like qobject_output_visitor_new(), except it obeys the
+ * policy for handling deprecated management interfaces set with
+ * -compat.
+ */
+Visitor *qobject_output_visitor_new_qmp(QObject **result);
+
+#endif
diff --git a/include/qapi/error.h b/include/qapi/error.h
index eaa05c4837..71f8fb2c50 100644
--- a/include/qapi/error.h
+++ b/include/qapi/error.h
@@ -207,7 +207,7 @@
*
* Without ERRP_GUARD(), use of the @errp parameter is restricted:
* - It must not be dereferenced, because it may be null.
- * - It should not be passed to error_prepend() or
+ * - It should not be passed to error_prepend(), error_vprepend(), or
* error_append_hint(), because that doesn't work with &error_fatal.
* ERRP_GUARD() lifts these restrictions.
*
@@ -235,7 +235,7 @@
* error_propagate_prepend(errp, *errp, ...) by error_prepend(errp, ...)
*
* 4. Ensure @errp is valid at return: when you destroy *errp, set
- * errp = NULL.
+ * *errp = NULL.
*
* Example:
*
@@ -320,7 +320,7 @@ ErrorClass error_get_class(const Error *err);
void error_setg_internal(Error **errp,
const char *src, int line, const char *func,
const char *fmt, ...)
- GCC_FMT_ATTR(5, 6);
+ G_GNUC_PRINTF(5, 6);
/*
* Just like error_setg(), with @os_error info added to the message.
@@ -336,7 +336,7 @@ void error_setg_internal(Error **errp,
void error_setg_errno_internal(Error **errp,
const char *fname, int line, const char *func,
int os_error, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
#ifdef _WIN32
/*
@@ -350,7 +350,7 @@ void error_setg_errno_internal(Error **errp,
void error_setg_win32_internal(Error **errp,
const char *src, int line, const char *func,
int win32_err, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
#endif
/*
@@ -383,21 +383,21 @@ void error_propagate(Error **dst_errp, Error *local_err);
*/
void error_propagate_prepend(Error **dst_errp, Error *local_err,
const char *fmt, ...)
- GCC_FMT_ATTR(3, 4);
+ G_GNUC_PRINTF(3, 4);
/*
* Prepend some text to @errp's human-readable error message.
* The text is made by formatting @fmt, @ap like vprintf().
*/
void error_vprepend(Error *const *errp, const char *fmt, va_list ap)
- GCC_FMT_ATTR(2, 0);
+ G_GNUC_PRINTF(2, 0);
/*
* Prepend some text to @errp's human-readable error message.
* The text is made by formatting @fmt, ... like printf().
*/
void error_prepend(Error *const *errp, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Append a printf-style human-readable explanation to an existing error.
@@ -414,7 +414,7 @@ void error_prepend(Error *const *errp, const char *fmt, ...)
* newline.
*/
void error_append_hint(Error *const *errp, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Convenience function to report open() failure.
@@ -458,13 +458,13 @@ void error_report_err(Error *err);
* Convenience function to error_prepend(), warn_report() and free @err.
*/
void warn_reportf_err(Error *err, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Convenience function to error_prepend(), error_report() and free @err.
*/
void error_reportf_err(Error *err, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/*
* Just like error_setg(), except you get to specify the error class.
@@ -477,7 +477,7 @@ void error_reportf_err(Error *err, const char *fmt, ...)
void error_set_internal(Error **errp,
const char *src, int line, const char *func,
ErrorClass err_class, const char *fmt, ...)
- GCC_FMT_ATTR(6, 7);
+ G_GNUC_PRINTF(6, 7);
/*
* Make @errp parameter easier to use regardless of argument value
@@ -520,6 +520,12 @@ static inline void error_propagator_cleanup(ErrorPropagator *prop)
G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(ErrorPropagator, error_propagator_cleanup);
/*
+ * Special error destination to warn on error.
+ * See error_setg() and error_propagate() for details.
+ */
+extern Error *error_warn;
+
+/*
* Special error destination to abort on error.
* See error_setg() and error_propagate() for details.
*/
diff --git a/include/qapi/forward-visitor.h b/include/qapi/forward-visitor.h
new file mode 100644
index 0000000000..50fb3e9d50
--- /dev/null
+++ b/include/qapi/forward-visitor.h
@@ -0,0 +1,27 @@
+/*
+ * Forwarding visitor
+ *
+ * Copyright Red Hat, Inc. 2021
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef FORWARD_VISITOR_H
+#define FORWARD_VISITOR_H
+
+#include "qapi/visitor.h"
+
+typedef struct ForwardFieldVisitor ForwardFieldVisitor;
+
+/*
+ * The forwarding visitor only expects a single name, @from, to be passed for
+ * toplevel fields. It is converted to @to and forwarded to the @target visitor.
+ * Calls within a struct are forwarded without changing the name.
+ */
+Visitor *visitor_forward_field(Visitor *target, const char *from, const char *to);
+
+#endif
diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
index 5a9cf82472..f2e956813a 100644
--- a/include/qapi/qmp/dispatch.h
+++ b/include/qapi/qmp/dispatch.h
@@ -14,42 +14,49 @@
#ifndef QAPI_QMP_DISPATCH_H
#define QAPI_QMP_DISPATCH_H
+#include "monitor/monitor.h"
#include "qemu/queue.h"
typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
typedef enum QmpCommandOptions
{
- QCO_NO_OPTIONS = 0x0,
QCO_NO_SUCCESS_RESP = (1U << 0),
QCO_ALLOW_OOB = (1U << 1),
QCO_ALLOW_PRECONFIG = (1U << 2),
+ QCO_COROUTINE = (1U << 3),
} QmpCommandOptions;
typedef struct QmpCommand
{
const char *name;
+ /* Runs in coroutine context if QCO_COROUTINE is set */
QmpCommandFunc *fn;
QmpCommandOptions options;
+ unsigned special_features;
QTAILQ_ENTRY(QmpCommand) node;
bool enabled;
+ const char *disable_reason;
} QmpCommand;
typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList;
void qmp_register_command(QmpCommandList *cmds, const char *name,
- QmpCommandFunc *fn, QmpCommandOptions options);
+ QmpCommandFunc *fn, QmpCommandOptions options,
+ unsigned special_features);
const QmpCommand *qmp_find_command(const QmpCommandList *cmds,
const char *name);
-void qmp_disable_command(QmpCommandList *cmds, const char *name);
+void qmp_disable_command(QmpCommandList *cmds, const char *name,
+ const char *err_msg);
void qmp_enable_command(QmpCommandList *cmds, const char *name);
bool qmp_command_is_enabled(const QmpCommand *cmd);
+bool qmp_command_available(const QmpCommand *cmd, Error **errp);
const char *qmp_command_name(const QmpCommand *cmd);
bool qmp_has_success_response(const QmpCommand *cmd);
QDict *qmp_error_response(Error *err);
-QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request,
- bool allow_oob);
+QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request,
+ bool allow_oob, Monitor *cur_mon);
bool qmp_is_oob(const QDict *dict);
typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque);
diff --git a/include/qapi/qmp/json-writer.h b/include/qapi/qmp/json-writer.h
new file mode 100644
index 0000000000..b70ba64077
--- /dev/null
+++ b/include/qapi/qmp/json-writer.h
@@ -0,0 +1,35 @@
+/*
+ * JSON Writer
+ *
+ * Copyright (c) 2020 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef JSON_WRITER_H
+#define JSON_WRITER_H
+
+JSONWriter *json_writer_new(bool pretty);
+const char *json_writer_get(JSONWriter *);
+GString *json_writer_get_and_free(JSONWriter *);
+void json_writer_free(JSONWriter *);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(JSONWriter, json_writer_free)
+
+void json_writer_start_object(JSONWriter *, const char *name);
+void json_writer_end_object(JSONWriter *);
+void json_writer_start_array(JSONWriter *, const char *name);
+void json_writer_end_array(JSONWriter *);
+void json_writer_bool(JSONWriter *, const char *name, bool val);
+void json_writer_null(JSONWriter *, const char *name);
+void json_writer_int64(JSONWriter *, const char *name, int64_t val);
+void json_writer_uint64(JSONWriter *, const char *name, uint64_t val);
+void json_writer_double(JSONWriter *, const char *name, double val);
+void json_writer_str(JSONWriter *, const char *name, const char *str);
+
+#endif
diff --git a/include/qapi/qmp/qbool.h b/include/qapi/qmp/qbool.h
index 5f61e38e64..0d09726939 100644
--- a/include/qapi/qmp/qbool.h
+++ b/include/qapi/qmp/qbool.h
@@ -21,9 +21,11 @@ struct QBool {
bool value;
};
+void qbool_unref(QBool *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QBool, qbool_unref)
+
QBool *qbool_from_bool(bool value);
bool qbool_get_bool(const QBool *qb);
-bool qbool_is_equal(const QObject *x, const QObject *y);
-void qbool_destroy_obj(QObject *obj);
#endif /* QBOOL_H */
diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h
index da942347a7..82e90fc072 100644
--- a/include/qapi/qmp/qdict.h
+++ b/include/qapi/qmp/qdict.h
@@ -30,6 +30,10 @@ struct QDict {
QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX];
};
+void qdict_unref(QDict *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QDict, qdict_unref)
+
/* Object API */
QDict *qdict_new(void);
const char *qdict_entry_key(const QDictEntry *entry);
@@ -39,10 +43,8 @@ void qdict_put_obj(QDict *qdict, const char *key, QObject *value);
void qdict_del(QDict *qdict, const char *key);
int qdict_haskey(const QDict *qdict, const char *key);
QObject *qdict_get(const QDict *qdict, const char *key);
-bool qdict_is_equal(const QObject *x, const QObject *y);
const QDictEntry *qdict_first(const QDict *qdict);
const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry);
-void qdict_destroy_obj(QObject *obj);
/* Helper to qdict_put_obj(), accepts any object */
#define qdict_put(qdict, key, obj) \
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
index 7c76e24aa7..00b18e9082 100644
--- a/include/qapi/qmp/qerror.h
+++ b/include/qapi/qmp/qerror.h
@@ -16,78 +16,22 @@
* These macros will go away, please don't use in new code, and do not
* add new ones!
*/
-#define QERR_BASE_NOT_FOUND \
- "Base '%s' not found"
-
-#define QERR_BUS_NO_HOTPLUG \
- "Bus '%s' does not support hotplugging"
-
-#define QERR_DEVICE_HAS_NO_MEDIUM \
- "Device '%s' has no medium"
-
-#define QERR_DEVICE_INIT_FAILED \
- "Device '%s' could not be initialized"
-
-#define QERR_DEVICE_IN_USE \
- "Device '%s' is in use"
-
-#define QERR_DEVICE_NO_HOTPLUG \
- "Device '%s' does not support hotplugging"
-
-#define QERR_FD_NOT_FOUND \
- "File descriptor named '%s' not found"
-
-#define QERR_FD_NOT_SUPPLIED \
- "No file descriptor supplied via SCM_RIGHTS"
-
-#define QERR_FEATURE_DISABLED \
- "The feature '%s' is not enabled"
-
-#define QERR_INVALID_BLOCK_FORMAT \
- "Invalid block format '%s'"
-
-#define QERR_INVALID_PARAMETER \
- "Invalid parameter '%s'"
-
-#define QERR_INVALID_PARAMETER_TYPE \
- "Invalid parameter type for '%s', expected: %s"
#define QERR_INVALID_PARAMETER_VALUE \
"Parameter '%s' expects %s"
-#define QERR_INVALID_PASSWORD \
- "Password incorrect"
-
#define QERR_IO_ERROR \
"An IO error has occurred"
-#define QERR_MIGRATION_ACTIVE \
- "There's a migration process in progress"
-
#define QERR_MISSING_PARAMETER \
"Parameter '%s' is missing"
-#define QERR_PERMISSION_DENIED \
- "Insufficient permission to perform this operation"
-
-#define QERR_PROPERTY_VALUE_BAD \
- "Property '%s.%s' doesn't take value '%s'"
-
#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \
"Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")"
#define QERR_QGA_COMMAND_FAILED \
"Guest agent command failed, error was '%s'"
-#define QERR_REPLAY_NOT_SUPPORTED \
- "Record/replay feature is not supported for '%s'"
-
-#define QERR_SET_PASSWD_FAILED \
- "Could not set password"
-
-#define QERR_UNDEFINED_ERROR \
- "An undefined error has occurred"
-
#define QERR_UNSUPPORTED \
"this feature or command is not currently supported"
diff --git a/include/qapi/qmp/qjson.h b/include/qapi/qmp/qjson.h
index 5ebbe5a118..7bd8d2de1b 100644
--- a/include/qapi/qmp/qjson.h
+++ b/include/qapi/qmp/qjson.h
@@ -17,15 +17,15 @@
QObject *qobject_from_json(const char *string, Error **errp);
QObject *qobject_from_vjsonf_nofail(const char *string, va_list ap)
- GCC_FMT_ATTR(1, 0);
+ G_GNUC_PRINTF(1, 0);
QObject *qobject_from_jsonf_nofail(const char *string, ...)
- GCC_FMT_ATTR(1, 2);
+ G_GNUC_PRINTF(1, 2);
QDict *qdict_from_vjsonf_nofail(const char *string, va_list ap)
- GCC_FMT_ATTR(1, 0);
+ G_GNUC_PRINTF(1, 0);
QDict *qdict_from_jsonf_nofail(const char *string, ...)
- GCC_FMT_ATTR(1, 2);
+ G_GNUC_PRINTF(1, 2);
-QString *qobject_to_json(const QObject *obj);
-QString *qobject_to_json_pretty(const QObject *obj);
+GString *qobject_to_json(const QObject *obj);
+GString *qobject_to_json_pretty(const QObject *obj, bool pretty);
#endif /* QJSON_H */
diff --git a/include/qapi/qmp/qlist.h b/include/qapi/qmp/qlist.h
index 595b7943e1..e4e985d435 100644
--- a/include/qapi/qmp/qlist.h
+++ b/include/qapi/qmp/qlist.h
@@ -26,6 +26,10 @@ struct QList {
QTAILQ_HEAD(,QListEntry) head;
};
+void qlist_unref(QList *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QList, qlist_unref)
+
#define qlist_append(qlist, obj) \
qlist_append_obj(qlist, QOBJECT(obj))
@@ -51,8 +55,6 @@ QObject *qlist_pop(QList *qlist);
QObject *qlist_peek(QList *qlist);
int qlist_empty(const QList *qlist);
size_t qlist_size(const QList *qlist);
-bool qlist_is_equal(const QObject *x, const QObject *y);
-void qlist_destroy_obj(QObject *obj);
static inline const QListEntry *qlist_first(const QList *qlist)
{
diff --git a/include/qapi/qmp/qnull.h b/include/qapi/qmp/qnull.h
index c1426882c5..7feb7c7d83 100644
--- a/include/qapi/qmp/qnull.h
+++ b/include/qapi/qmp/qnull.h
@@ -26,6 +26,8 @@ static inline QNull *qnull(void)
return qobject_ref(&qnull_);
}
-bool qnull_is_equal(const QObject *x, const QObject *y);
+void qnull_unref(QNull *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNull, qnull_unref)
#endif /* QNULL_H */
diff --git a/include/qapi/qmp/qnum.h b/include/qapi/qmp/qnum.h
index bbae0a5ec8..e86788dd2e 100644
--- a/include/qapi/qmp/qnum.h
+++ b/include/qapi/qmp/qnum.h
@@ -54,6 +54,10 @@ struct QNum {
} u;
};
+void qnum_unref(QNum *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QNum, qnum_unref)
+
QNum *qnum_from_int(int64_t value);
QNum *qnum_from_uint(uint64_t value);
QNum *qnum_from_double(double value);
@@ -68,7 +72,4 @@ double qnum_get_double(QNum *qn);
char *qnum_to_string(QNum *qn);
-bool qnum_is_equal(const QObject *x, const QObject *y);
-void qnum_destroy_obj(QObject *obj);
-
#endif /* QNUM_H */
diff --git a/include/qapi/qmp/qobject.h b/include/qapi/qmp/qobject.h
index fcfd549220..89b97d88bc 100644
--- a/include/qapi/qmp/qobject.h
+++ b/include/qapi/qmp/qobject.h
@@ -45,10 +45,16 @@ struct QObject {
struct QObjectBase_ base;
};
-#define QOBJECT(obj) ({ \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define QOBJECT_INTERNAL(obj, _obj) ({ \
typeof(obj) _obj = (obj); \
- _obj ? container_of(&(_obj)->base, QObject, base) : NULL; \
+ _obj ? container_of(&_obj->base, QObject, base) : NULL; \
})
+#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj))
/* Required for qobject_to() */
#define QTYPE_CAST_TO_QNull QTYPE_QNULL
@@ -64,14 +70,6 @@ QEMU_BUILD_BUG_MSG(QTYPE__MAX != 7,
#define qobject_to(type, obj) \
((type *)qobject_check_type(obj, glue(QTYPE_CAST_TO_, type)))
-/* Initialize an object to default values */
-static inline void qobject_init(QObject *obj, QType type)
-{
- assert(QTYPE_NONE < type && type < QTYPE__MAX);
- obj->base.refcnt = 1;
- obj->base.type = type;
-}
-
static inline void qobject_ref_impl(QObject *obj)
{
if (obj) {
@@ -90,6 +88,7 @@ bool qobject_is_equal(const QObject *x, const QObject *y);
/**
* qobject_destroy(): Free resources used by the object
+ * For use via qobject_unref() only!
*/
void qobject_destroy(QObject *obj);
diff --git a/include/qapi/qmp/qstring.h b/include/qapi/qmp/qstring.h
index e2e356e5e7..318d815d6a 100644
--- a/include/qapi/qmp/qstring.h
+++ b/include/qapi/qmp/qstring.h
@@ -17,23 +17,17 @@
struct QString {
struct QObjectBase_ base;
- char *string;
- size_t length;
- size_t capacity;
+ const char *string;
};
+void qstring_unref(QString *q);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QString, qstring_unref)
+
QString *qstring_new(void);
QString *qstring_from_str(const char *str);
QString *qstring_from_substr(const char *str, size_t start, size_t end);
-size_t qstring_get_length(const QString *qstring);
+QString *qstring_from_gstring(GString *gstr);
const char *qstring_get_str(const QString *qstring);
-const char *qstring_get_try_str(const QString *qstring);
-const char *qobject_get_try_str(const QObject *qstring);
-void qstring_append_int(QString *qstring, int64_t value);
-void qstring_append(QString *qstring, const char *str);
-void qstring_append_chr(QString *qstring, int c);
-bool qstring_is_equal(const QObject *x, const QObject *y);
-char *qstring_free(QString *qstring, bool return_str);
-void qstring_destroy_obj(QObject *obj);
#endif /* QSTRING_H */
diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h
index 268dfe9986..b1ee473b30 100644
--- a/include/qapi/string-output-visitor.h
+++ b/include/qapi/string-output-visitor.h
@@ -26,9 +26,9 @@ typedef struct StringOutputVisitor StringOutputVisitor;
* If everything else succeeds, pass @result to visit_complete() to
* collect the result of the visit.
*
- * The string output visitor does not implement support for visiting
- * QAPI structs, alternates, null, or arbitrary QTypes. It also
- * requires a non-null list argument to visit_start_list().
+ * The string output visitor does not implement support for alternates, null,
+ * or arbitrary QTypes. Struct fields are not shown. It also requires a
+ * non-null list argument to visit_start_list().
*/
Visitor *string_output_visitor_new(bool human, char **result);
diff --git a/include/qapi/type-helpers.h b/include/qapi/type-helpers.h
new file mode 100644
index 0000000000..fc8352cdec
--- /dev/null
+++ b/include/qapi/type-helpers.h
@@ -0,0 +1,22 @@
+/*
+ * QAPI common helper functions
+ *
+ * This file provides helper functions related to types defined
+ * in the QAPI schema.
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "qapi/qapi-types-common.h"
+
+HumanReadableText *human_readable_text_from_str(GString *str);
+
+/*
+ * Produce and return a NULL-terminated array of strings from @list.
+ * The result is g_malloc()'d and all strings are g_strdup()'d. It
+ * can be freed with g_strfreev(), or by g_auto(GStrv) automatic
+ * cleanup.
+ */
+char **strv_from_str_list(const strList *list);
diff --git a/include/qapi/util.h b/include/qapi/util.h
index a7c3c64148..20dfea8a54 100644
--- a/include/qapi/util.h
+++ b/include/qapi/util.h
@@ -11,15 +11,62 @@
#ifndef QAPI_UTIL_H
#define QAPI_UTIL_H
+typedef enum {
+ QAPI_DEPRECATED,
+ QAPI_UNSTABLE,
+} QapiSpecialFeature;
+
typedef struct QEnumLookup {
const char *const *array;
- int size;
+ const unsigned char *const special_features;
+ const int size;
} QEnumLookup;
const char *qapi_enum_lookup(const QEnumLookup *lookup, int val);
int qapi_enum_parse(const QEnumLookup *lookup, const char *buf,
int def, Error **errp);
+bool qapi_bool_parse(const char *name, const char *value, bool *obj,
+ Error **errp);
int parse_qapi_name(const char *name, bool complete);
+/*
+ * For any GenericList @list, insert @element at the front.
+ *
+ * Note that this macro evaluates @element exactly once, so it is safe
+ * to have side-effects with that argument.
+ */
+#define QAPI_LIST_PREPEND(list, element) do { \
+ typeof(list) _tmp = g_malloc(sizeof(*(list))); \
+ _tmp->value = (element); \
+ _tmp->next = (list); \
+ (list) = _tmp; \
+} while (0)
+
+/*
+ * For any pointer to a GenericList @tail (usually the 'next' member of a
+ * list element), insert @element at the back and update the tail.
+ *
+ * Note that this macro evaluates @element exactly once, so it is safe
+ * to have side-effects with that argument.
+ */
+#define QAPI_LIST_APPEND(tail, element) do { \
+ *(tail) = g_malloc0(sizeof(**(tail))); \
+ (*(tail))->value = (element); \
+ (tail) = &(*(tail))->next; \
+} while (0)
+
+/*
+ * For any GenericList @list, return its length.
+ */
+#define QAPI_LIST_LENGTH(list) \
+ ({ \
+ size_t _len = 0; \
+ typeof(list) _tail; \
+ for (_tail = list; _tail != NULL; _tail = _tail->next) { \
+ _len++; \
+ } \
+ _len; \
+ })
+
#endif
diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h
index 7362c043be..2badec5ba4 100644
--- a/include/qapi/visitor-impl.h
+++ b/include/qapi/visitor-impl.h
@@ -113,9 +113,20 @@ struct Visitor
The core takes care of the return type in the public interface. */
void (*optional)(Visitor *v, const char *name, bool *present);
+ /* Optional */
+ bool (*policy_reject)(Visitor *v, const char *name,
+ unsigned special_features, Error **errp);
+
+ /* Optional */
+ bool (*policy_skip)(Visitor *v, const char *name,
+ unsigned special_features);
+
/* Must be set */
VisitorType type;
+ /* Optional */
+ struct CompatPolicy compat_policy;
+
/* Must be set for output visitors, optional otherwise. */
void (*complete)(Visitor *v, void *opaque);
diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h
index ebc19ede7f..27b85d4700 100644
--- a/include/qapi/visitor.h
+++ b/include/qapi/visitor.h
@@ -16,6 +16,7 @@
#define QAPI_VISITOR_H
#include "qapi/qapi-builtin-types.h"
+#include "qapi/qapi-types-compat.h"
/*
* The QAPI schema defines both a set of C data types, and a QMP wire
@@ -38,7 +39,7 @@
* limitations; see the documentation for each visitor for more
* details on what it supports. Also, see visitor-impl.h for the
* callback contracts implemented by each visitor, and
- * docs/devel/qapi-code-gen.txt for more about the QAPI code
+ * docs/devel/qapi-code-gen.rst for more about the QAPI code
* generator.
*
* All of the visitors are created via:
@@ -460,6 +461,41 @@ void visit_end_alternate(Visitor *v, void **obj);
bool visit_optional(Visitor *v, const char *name, bool *present);
/*
+ * Should we reject member @name due to policy?
+ *
+ * @special_features is the member's special features encoded as a
+ * bitset of QapiSpecialFeature.
+ *
+ * @name must not be NULL. This function is only useful between
+ * visit_start_struct() and visit_end_struct(), since only objects
+ * have deprecated members.
+ */
+bool visit_policy_reject(Visitor *v, const char *name,
+ unsigned special_features, Error **errp);
+
+/*
+ *
+ * Should we skip member @name due to policy?
+ *
+ * @special_features is the member's special features encoded as a
+ * bitset of QapiSpecialFeature.
+ *
+ * @name must not be NULL. This function is only useful between
+ * visit_start_struct() and visit_end_struct(), since only objects
+ * have deprecated members.
+ */
+bool visit_policy_skip(Visitor *v, const char *name,
+ unsigned special_features);
+
+/*
+ * Set policy for handling deprecated management interfaces.
+ *
+ * Intended use: call visit_set_policy(v, &compat_policy) when
+ * visiting management interface input or output.
+ */
+void visit_set_policy(Visitor *v, CompatPolicy *policy);
+
+/*
* Visit an enum value.
*
* @name expresses the relationship of this enum to its parent
diff --git a/include/qemu-common.h b/include/qemu-common.h
deleted file mode 100644
index bb9496bd80..0000000000
--- a/include/qemu-common.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * This file is supposed to be included only by .c files. No header file should
- * depend on qemu-common.h, as this would easily lead to circular header
- * dependencies.
- *
- * If a header file uses a definition from qemu-common.h, that definition
- * must be moved to a separate header file, and the header that uses it
- * must include that header.
- */
-#ifndef QEMU_COMMON_H
-#define QEMU_COMMON_H
-
-#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR)
-
-/* Copyright string for -version arguments, About dialogs, etc */
-#define QEMU_COPYRIGHT "Copyright (c) 2003-2020 " \
- "Fabrice Bellard and the QEMU Project developers"
-
-/* Bug reporting information for --help arguments, About dialogs, etc */
-#define QEMU_HELP_BOTTOM \
- "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \
- "More information on the QEMU project at <https://qemu.org>."
-
-/* main function, renamed */
-#if defined(CONFIG_COCOA)
-int qemu_main(int argc, char **argv, char **envp);
-#endif
-
-void qemu_get_timedate(struct tm *tm, int offset);
-int qemu_timedate_diff(struct tm *tm);
-
-void *qemu_oom_check(void *ptr);
-
-ssize_t qemu_write_full(int fd, const void *buf, size_t count)
- QEMU_WARN_UNUSED_RESULT;
-
-#ifndef _WIN32
-int qemu_pipe(int pipefd[2]);
-/* like openpty() but also makes it raw; return master fd */
-int qemu_openpty_raw(int *aslave, char *pty_name);
-#endif
-
-#ifdef _WIN32
-/* MinGW needs type casts for the 'buf' and 'optval' arguments. */
-#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
- getsockopt(sockfd, level, optname, (void *)optval, optlen)
-#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
- setsockopt(sockfd, level, optname, (const void *)optval, optlen)
-#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags)
-#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
- sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen)
-#else
-#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
- getsockopt(sockfd, level, optname, optval, optlen)
-#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
- setsockopt(sockfd, level, optname, optval, optlen)
-#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags)
-#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
- sendto(sockfd, buf, len, flags, destaddr, addrlen)
-#endif
-
-void cpu_exec_init_all(void);
-void cpu_exec_step_atomic(CPUState *cpu);
-
-/**
- * set_preferred_target_page_bits:
- * @bits: number of bits needed to represent an address within the page
- *
- * Set the preferred target page size (the actual target page
- * size may be smaller than any given CPU's preference).
- * Returns true on success, false on failure (which can only happen
- * if this is called after the system has already finalized its
- * choice of page size and the requested page size is smaller than that).
- */
-bool set_preferred_target_page_bits(int bits);
-
-/**
- * finalize_target_page_bits:
- * Commit the final value set by set_preferred_target_page_bits.
- */
-void finalize_target_page_bits(void);
-
-/**
- * Sends a (part of) iovec down a socket, yielding when the socket is full, or
- * Receives data into a (part of) iovec from a socket,
- * yielding when there is no data in the socket.
- * The same interface as qemu_sendv_recvv(), with added yielding.
- * XXX should mark these as coroutine_fn
- */
-ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
- size_t offset, size_t bytes, bool do_send);
-#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
- qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
-#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
- qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
-
-/**
- * The same as above, but with just a single buffer
- */
-ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send);
-#define qemu_co_recv(sockfd, buf, bytes) \
- qemu_co_send_recv(sockfd, buf, bytes, false)
-#define qemu_co_send(sockfd, buf, bytes) \
- qemu_co_send_recv(sockfd, buf, bytes, true)
-
-void qemu_progress_init(int enabled, float min_skip);
-void qemu_progress_end(void);
-void qemu_progress_print(float delta, int max);
-const char *qemu_get_vm_name(void);
-
-#define QEMU_FILE_TYPE_BIOS 0
-#define QEMU_FILE_TYPE_KEYMAP 1
-/**
- * qemu_find_file:
- * @type: QEMU_FILE_TYPE_BIOS (for BIOS, VGA BIOS)
- * or QEMU_FILE_TYPE_KEYMAP (for keymaps).
- * @name: Relative or absolute file name
- *
- * If @name exists on disk as an absolute path, or a path relative
- * to the current directory, then returns @name unchanged.
- * Otherwise searches for @name file in the data directories, either
- * configured at build time (DATADIR) or registered with the -L command
- * line option.
- *
- * The caller must use g_free() to free the returned data when it is
- * no longer required.
- *
- * Returns: a path that can access @name, or NULL if no matching file exists.
- */
-char *qemu_find_file(int type, const char *name);
-
-/* OS specific functions */
-void os_setup_early_signal_handling(void);
-char *os_find_datadir(void);
-int os_parse_cmd_args(int index, const char *optarg);
-
-/*
- * Hexdump a buffer to a file. An optional string prefix is added to every line
- */
-
-void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size);
-
-/*
- * helper to parse debug environment variables
- */
-int parse_debug_env(const char *name, int max, int initial);
-
-const char *qemu_ether_ntoa(const MACAddr *mac);
-void page_size_init(void);
-
-/* returns non-zero if dump is in progress, otherwise zero is
- * returned. */
-bool dump_in_progress(void);
-
-#endif
diff --git a/include/qemu-main.h b/include/qemu-main.h
new file mode 100644
index 0000000000..940960a7db
--- /dev/null
+++ b/include/qemu-main.h
@@ -0,0 +1,11 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_MAIN_H
+#define QEMU_MAIN_H
+
+int qemu_default_main(void);
+extern int (*qemu_main)(void);
+
+#endif /* QEMU_MAIN_H */
diff --git a/include/sysemu/accel.h b/include/qemu/accel.h
index e08b8ab8fa..972a849a2b 100644
--- a/include/sysemu/accel.h
+++ b/include/qemu/accel.h
@@ -20,16 +20,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
-#ifndef HW_ACCEL_H
-#define HW_ACCEL_H
+#ifndef QEMU_ACCEL_H
+#define QEMU_ACCEL_H
#include "qom/object.h"
#include "exec/hwaddr.h"
-typedef struct AccelState {
+struct AccelState {
/*< private >*/
Object parent_obj;
-} AccelState;
+};
typedef struct AccelClass {
/*< private >*/
@@ -37,12 +37,18 @@ typedef struct AccelClass {
/*< public >*/
const char *name;
-#ifndef CONFIG_USER_ONLY
int (*init_machine)(MachineState *ms);
+#ifndef CONFIG_USER_ONLY
void (*setup_post)(MachineState *ms, AccelState *accel);
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
#endif
+ bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
+ void (*cpu_common_unrealize)(CPUState *cpu);
+
+ /* gdbstub related hooks */
+ int (*gdbstub_supported_sstep_flags)(void);
+
bool *allowed;
/*
* Array of global properties that would be applied when specific
@@ -67,11 +73,43 @@ typedef struct AccelClass {
OBJECT_GET_CLASS(AccelClass, (obj), TYPE_ACCEL)
AccelClass *accel_find(const char *opt_name);
+AccelState *current_accel(void);
+const char *current_accel_name(void);
+
+void accel_init_interfaces(AccelClass *ac);
+
+#ifndef CONFIG_USER_ONLY
int accel_init_machine(AccelState *accel, MachineState *ms);
/* Called just before os_setup_post (ie just before drop OS privs) */
void accel_setup_post(MachineState *ms);
+#endif /* !CONFIG_USER_ONLY */
-AccelState *current_accel(void);
+/**
+ * accel_cpu_instance_init:
+ * @cpu: The CPU that needs to do accel-specific object initializations.
+ */
+void accel_cpu_instance_init(CPUState *cpu);
-#endif
+/**
+ * accel_cpu_common_realize:
+ * @cpu: The CPU that needs to call accel-specific cpu realization.
+ * @errp: currently unused.
+ */
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp);
+
+/**
+ * accel_cpu_common_unrealize:
+ * @cpu: The CPU that needs to call accel-specific cpu unrealization.
+ */
+void accel_cpu_common_unrealize(CPUState *cpu);
+
+/**
+ * accel_supported_gdbstub_sstep_flags:
+ *
+ * Returns the supported single step modes for the configured
+ * accelerator.
+ */
+int accel_supported_gdbstub_sstep_flags(void);
+
+#endif /* QEMU_ACCEL_H */
diff --git a/include/qemu/async-teardown.h b/include/qemu/async-teardown.h
new file mode 100644
index 0000000000..b281da005b
--- /dev/null
+++ b/include/qemu/async-teardown.h
@@ -0,0 +1,20 @@
+/*
+ * Asynchronous teardown
+ *
+ * Copyright IBM, Corp. 2022
+ *
+ * Authors:
+ * Claudio Imbrenda <imbrenda@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_ASYNC_TEARDOWN_H
+#define QEMU_ASYNC_TEARDOWN_H
+
+#ifdef CONFIG_LINUX
+void init_async_teardown(void);
+#endif
+
+#endif
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index ff72db5115..99110abefb 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -8,13 +8,15 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
- * See docs/devel/atomics.txt for discussion about the guarantees each
+ * See docs/devel/atomics.rst for discussion about the guarantees each
* atomic primitive is meant to provide.
*/
#ifndef QEMU_ATOMIC_H
#define QEMU_ATOMIC_H
+#include "compiler.h"
+
/* Compiler barrier */
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
@@ -60,8 +62,9 @@
(unsigned short)1, \
(expr)+0))))))
-#ifdef __ATOMIC_RELAXED
-/* For C11 atomic ops */
+#ifndef __ATOMIC_RELAXED
+#error "Expecting C11 atomic ops"
+#endif
/* Manual memory barriers
*
@@ -80,7 +83,7 @@
* no processors except Alpha need a barrier here. Leave it in if
* using Thread Sanitizer to avoid warnings, otherwise optimize it away.
*/
-#if defined(__SANITIZE_THREAD__)
+#ifdef QEMU_SANITIZE_THREAD
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
#elif defined(__alpha__)
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
@@ -125,381 +128,199 @@
* no effect on the generated code but not using the atomic primitives
* will get flagged by sanitizers as a violation.
*/
-#define atomic_read__nocheck(ptr) \
+#define qatomic_read__nocheck(ptr) \
__atomic_load_n(ptr, __ATOMIC_RELAXED)
-#define atomic_read(ptr) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_read__nocheck(ptr); \
+#define qatomic_read(ptr) \
+ ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_read__nocheck(ptr); \
})
-#define atomic_set__nocheck(ptr, i) \
+#define qatomic_set__nocheck(ptr, i) \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
-#define atomic_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_set__nocheck(ptr, i); \
+#define qatomic_set(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_set__nocheck(ptr, i); \
} while(0)
/* See above: most compilers currently treat consume and acquire the
- * same, but this slows down atomic_rcu_read unnecessarily.
+ * same, but this slows down qatomic_rcu_read unnecessarily.
*/
-#ifdef __SANITIZE_THREAD__
-#define atomic_rcu_read__nocheck(ptr, valptr) \
+#ifdef QEMU_SANITIZE_THREAD
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
__atomic_load(ptr, valptr, __ATOMIC_CONSUME);
#else
-#define atomic_rcu_read__nocheck(ptr, valptr) \
- __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
+ __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
smp_read_barrier_depends();
#endif
-#define atomic_rcu_read(ptr) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- typeof_strip_qual(*ptr) _val; \
- atomic_rcu_read__nocheck(ptr, &_val); \
- _val; \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define qatomic_rcu_read_internal(ptr, _val) \
+ ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ typeof_strip_qual(*ptr) _val; \
+ qatomic_rcu_read__nocheck(ptr, &_val); \
+ _val; \
})
+#define qatomic_rcu_read(ptr) \
+ qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val))
-#define atomic_rcu_set(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
+#define qatomic_rcu_set(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
-#define atomic_load_acquire(ptr) \
+#define qatomic_load_acquire(ptr) \
({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
__atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
_val; \
})
-#define atomic_store_release(ptr, i) do { \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
+#define qatomic_store_release(ptr, i) do { \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
/* All the remaining operations are fully sequentially consistent */
-#define atomic_xchg__nocheck(ptr, i) ({ \
+#define qatomic_xchg__nocheck(ptr, i) ({ \
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
})
-#define atomic_xchg(ptr, i) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_xchg__nocheck(ptr, i); \
+#define qatomic_xchg(ptr, i) ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_xchg__nocheck(ptr, i); \
})
-/* Returns the eventual value, failed or not */
-#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
+/* Returns the old value of '*ptr' (whether the cmpxchg failed or not) */
+#define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
typeof_strip_qual(*ptr) _old = (old); \
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \
})
-#define atomic_cmpxchg(ptr, old, new) ({ \
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_cmpxchg__nocheck(ptr, old, new); \
+#define qatomic_cmpxchg(ptr, old, new) ({ \
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
+ qatomic_cmpxchg__nocheck(ptr, old, new); \
})
/* Provide shorter names for GCC atomic builtins, return old value */
-#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
-#endif
-
-#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+
+#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
+
+#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_inc(ptr) \
+ ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_dec(ptr) \
+ ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_add(ptr, n) \
+ ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_sub(ptr, n) \
+ ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_and(ptr, n) \
+ ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_or(ptr, n) \
+ ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_xor(ptr, n) \
+ ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
-#else /* __ATOMIC_RELAXED */
+#define smp_wmb() smp_mb_release()
+#define smp_rmb() smp_mb_acquire()
/*
- * We use GCC builtin if it's available, as that can use mfence on
- * 32-bit as well, e.g. if built with -march=pentium-m. However, on
- * i386 the spec is buggy, and the implementation followed it until
- * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
+ * SEQ_CST is weaker than the older __sync_* builtins and Linux
+ * kernel read-modify-write atomics. Provide a macro to obtain
+ * the same semantics.
*/
-#if defined(__i386__) || defined(__x86_64__)
-#if !QEMU_GNUC_PREREQ(4, 4)
-#if defined __x86_64__
-#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
+#if !defined(QEMU_SANITIZE_THREAD) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+# define smp_mb__before_rmw() signal_barrier()
+# define smp_mb__after_rmw() signal_barrier()
#else
-#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
-#endif
+# define smp_mb__before_rmw() smp_mb()
+# define smp_mb__after_rmw() smp_mb()
#endif
-#endif
-
-
-#ifdef __alpha__
-#define smp_read_barrier_depends() asm volatile("mb":::"memory")
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
/*
- * Because of the strongly ordered storage model, wmb() and rmb() are nops
- * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
- * qemu memory or non-temporal load/stores from C code.
+ * On some architectures, qatomic_set_mb is more efficient than a store
+ * plus a fence.
*/
-#define smp_mb_release() barrier()
-#define smp_mb_acquire() barrier()
-/*
- * __sync_lock_test_and_set() is documented to be an acquire barrier only,
- * but it is a full barrier at the hardware level. Add a compiler barrier
- * to make it a full barrier also at the compiler level.
- */
-#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
-
-#elif defined(_ARCH_PPC)
-
-/*
- * We use an eieio() for wmb() on powerpc. This assumes we don't
- * need to order cacheable and non-cacheable stores with respect to
- * each other.
- *
- * smp_mb has the same problem as on x86 for not-very-new GCC
- * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
- */
-#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
-#if defined(__powerpc64__)
-#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
+#if !defined(QEMU_SANITIZE_THREAD) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+# define qatomic_set_mb(ptr, i) \
+ ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
-#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
-#endif
-#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
-
-#endif /* _ARCH_PPC */
-
-/*
- * For (host) platforms we don't have explicit barrier definitions
- * for, we use the gcc __sync_synchronize() primitive to generate a
- * full barrier. This should be safe on all platforms, though it may
- * be overkill for smp_mb_acquire() and smp_mb_release().
- */
-#ifndef smp_mb
-#define smp_mb() __sync_synchronize()
+# define qatomic_set_mb(ptr, i) \
+ ({ qatomic_store_release(ptr, i); smp_mb(); })
#endif
-#ifndef smp_mb_acquire
-#define smp_mb_acquire() __sync_synchronize()
-#endif
-
-#ifndef smp_mb_release
-#define smp_mb_release() __sync_synchronize()
-#endif
-
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() barrier()
-#endif
-
-#ifndef signal_barrier
-#define signal_barrier() barrier()
-#endif
-
-/* These will only be atomic if the processor does the fetch or store
- * in a single issue memory operation
- */
-#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
-#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
-
-#define atomic_read(ptr) atomic_read__nocheck(ptr)
-#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
-
-/**
- * atomic_rcu_read - reads a RCU-protected pointer to a local variable
- * into a RCU read-side critical section. The pointer can later be safely
- * dereferenced within the critical section.
- *
- * This ensures that the pointer copy is invariant thorough the whole critical
- * section.
- *
- * Inserts memory barriers on architectures that require them (currently only
- * Alpha) and documents which pointers are protected by RCU.
- *
- * atomic_rcu_read also includes a compiler barrier to ensure that
- * value-speculative optimizations (e.g. VSS: Value Speculation
- * Scheduling) does not perform the data read before the pointer read
- * by speculating the value of the pointer.
- *
- * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
- */
-#define atomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
- smp_read_barrier_depends(); \
- _val; \
+#define qatomic_fetch_inc_nonzero(ptr) ({ \
+ typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \
+ while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
+ _oldn = qatomic_read(ptr); \
+ } \
+ _oldn; \
})
-/**
- * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
- * meant to be read by RCU read-side critical sections.
+/*
+ * Abstractions to access atomically (i.e. "once") i64/u64 variables.
*
- * Documents which pointers will be dereferenced by RCU read-side critical
- * sections and adds the required memory barriers on architectures requiring
- * them. It also makes sure the compiler does not reorder code initializing the
- * data structure before its publication.
+ * The i386 abi is odd in that by default members are only aligned to
+ * 4 bytes, which means that 8-byte types can wind up mis-aligned.
+ * Clang will then warn about this, and emit a call into libatomic.
*
- * Should match atomic_rcu_read().
+ * Use of these types in structures when they will be used with atomic
+ * operations can avoid this.
*/
-#define atomic_rcu_set(ptr, i) do { \
- smp_wmb(); \
- atomic_set(ptr, i); \
-} while (0)
-
-#define atomic_load_acquire(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
- smp_mb_acquire(); \
- _val; \
-})
-
-#define atomic_store_release(ptr, i) do { \
- smp_mb_release(); \
- atomic_set(ptr, i); \
-} while (0)
-
-#ifndef atomic_xchg
-#if defined(__clang__)
-#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
-#else
-/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
-#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
-#endif
-#endif
-#define atomic_xchg__nocheck atomic_xchg
-
-/* Provide shorter names for GCC atomic builtins. */
-#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
-#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
-#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
-#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
-#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
-#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
-#endif
-
-#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
-#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
-#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
-#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
-#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
-#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
-#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
-
-#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
-#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
+typedef int64_t aligned_int64_t __attribute__((aligned(8)));
+typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
-/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
-#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
-#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
-#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
-#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
-#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
-
-#endif /* __ATOMIC_RELAXED */
-
-#ifndef smp_wmb
-#define smp_wmb() smp_mb_release()
-#endif
-#ifndef smp_rmb
-#define smp_rmb() smp_mb_acquire()
-#endif
-
-/* This is more efficient than a store plus a fence. */
-#if !defined(__SANITIZE_THREAD__)
-#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
-#endif
-#endif
-
-/* atomic_mb_read/set semantics map Java volatile variables. They are
- * less expensive on some platforms (notably POWER) than fully
- * sequentially consistent operations.
- *
- * As long as they are used as paired operations they are safe to
- * use. See docs/devel/atomics.txt for more discussion.
- */
-
-#ifndef atomic_mb_read
-#define atomic_mb_read(ptr) \
- atomic_load_acquire(ptr)
-#endif
-
-#ifndef atomic_mb_set
-#define atomic_mb_set(ptr, i) do { \
- atomic_store_release(ptr, i); \
- smp_mb(); \
-} while(0)
-#endif
-
-#define atomic_fetch_inc_nonzero(ptr) ({ \
- typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
- while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
- _oldn = atomic_read(ptr); \
- } \
- _oldn; \
-})
-
-/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
#ifdef CONFIG_ATOMIC64
-static inline int64_t atomic_read_i64(const int64_t *ptr)
-{
- /* use __nocheck because sizeof(void *) might be < sizeof(u64) */
- return atomic_read__nocheck(ptr);
-}
-
-static inline uint64_t atomic_read_u64(const uint64_t *ptr)
-{
- return atomic_read__nocheck(ptr);
-}
-
-static inline void atomic_set_i64(int64_t *ptr, int64_t val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-static inline void atomic64_init(void)
+/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
+#define qatomic_read_i64(P) \
+ _Generic(*(P), int64_t: qatomic_read__nocheck(P))
+#define qatomic_read_u64(P) \
+ _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
+#define qatomic_set_i64(P, V) \
+ _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
+#define qatomic_set_u64(P, V) \
+ _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
+
+static inline void qatomic64_init(void)
{
}
#else /* !CONFIG_ATOMIC64 */
-int64_t atomic_read_i64(const int64_t *ptr);
-uint64_t atomic_read_u64(const uint64_t *ptr);
-void atomic_set_i64(int64_t *ptr, int64_t val);
-void atomic_set_u64(uint64_t *ptr, uint64_t val);
-void atomic64_init(void);
+int64_t qatomic_read_i64(const int64_t *ptr);
+uint64_t qatomic_read_u64(const uint64_t *ptr);
+void qatomic_set_i64(int64_t *ptr, int64_t val);
+void qatomic_set_u64(uint64_t *ptr, uint64_t val);
+void qatomic64_init(void);
#endif /* !CONFIG_ATOMIC64 */
#endif /* QEMU_ATOMIC_H */
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index 6b34484e15..88af6d4ea3 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -6,7 +6,7 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
- * See docs/devel/atomics.txt for discussion about the guarantees each
+ * See docs/devel/atomics.rst for discussion about the guarantees each
* atomic primitive is meant to provide.
*/
@@ -16,6 +16,23 @@
#include "qemu/int128.h"
/*
+ * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
+ * that are supported by the host, e.g. s390x. We can force the pointer to
+ * have our known alignment with __builtin_assume_aligned, however prior to
+ * GCC 13 that was only reliable with optimization enabled. See
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ */
+#if defined(CONFIG_ATOMIC128_OPT)
+# if !defined(__OPTIMIZE__)
+# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
+# endif
+# define CONFIG_ATOMIC128
+#endif
+#ifndef ATTRIBUTE_ATOMIC128_OPT
+# define ATTRIBUTE_ATOMIC128_OPT
+#endif
+
+/*
* GCC is a house divided about supporting large atomic operations.
*
* For hosts that only have large compare-and-swap, a legalistic reading
@@ -26,8 +43,8 @@
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
*
* This interpretation is not especially helpful for QEMU.
- * For softmmu, all RAM is always read/write from the hypervisor.
- * For user-only, if the guest doesn't implement such an __atomic_read
+ * For system-mode, all RAM is always read/write from the hypervisor.
+ * For user-mode, if the guest doesn't implement such an __atomic_read
* then the host need not worry about it either.
*
* Moreover, using libatomic is not an option, because its interface is
@@ -41,115 +58,7 @@
* Therefore, special case each platform.
*/
-#if defined(CONFIG_ATOMIC128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- return atomic_cmpxchg__nocheck(ptr, cmp, new);
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(CONFIG_CMPXCHG128)
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- return __sync_val_compare_and_swap_16(ptr, cmp, new);
-}
-# define HAVE_CMPXCHG128 1
-#elif defined(__aarch64__)
-/* Through gcc 8, aarch64 has no support for 128-bit at all. */
-static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
-{
- uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
- uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
- uint64_t oldl, oldh;
- uint32_t tmp;
-
- asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
- "cmp %[oldl], %[cmpl]\n\t"
- "ccmp %[oldh], %[cmph], #0, eq\n\t"
- "b.ne 1f\n\t"
- "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
- "cbnz %w[tmp], 0b\n"
- "1:"
- : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
- [oldl] "=&r"(oldl), [oldh] "=&r"(oldh)
- : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
- [newl] "r"(newl), [newh] "r"(newh)
- : "memory", "cc");
-
- return int128_make128(oldl, oldh);
-}
-# define HAVE_CMPXCHG128 1
-#else
-/* Fallback definition that must be optimized away, or error. */
-Int128 QEMU_ERROR("unsupported atomic")
- atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
-# define HAVE_CMPXCHG128 0
-#endif /* Some definition for HAVE_CMPXCHG128 */
-
-
-#if defined(CONFIG_ATOMIC128)
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- return atomic_read__nocheck(ptr);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- atomic_set__nocheck(ptr, val);
-}
-
-# define HAVE_ATOMIC128 1
-#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
-/* We can do better than cmpxchg for AArch64. */
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- uint64_t l, h;
- uint32_t tmp;
-
- /* The load must be paired with the store to guarantee not tearing. */
- asm("0: ldxp %[l], %[h], %[mem]\n\t"
- "stxp %w[tmp], %[l], %[h], %[mem]\n\t"
- "cbnz %w[tmp], 0b"
- : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h));
-
- return int128_make128(l, h);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- uint64_t l = int128_getlo(val), h = int128_gethi(val);
- uint64_t t1, t2;
-
- /* Load into temporaries to acquire the exclusive access lock. */
- asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
- "stxp %w[t1], %[l], %[h], %[mem]\n\t"
- "cbnz %w[t1], 0b"
- : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
- : [l] "r"(l), [h] "r"(h));
-}
-
-# define HAVE_ATOMIC128 1
-#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
-static inline Int128 atomic16_read(Int128 *ptr)
-{
- /* Maybe replace 0 with 0, returning the old value. */
- return atomic16_cmpxchg(ptr, 0, 0);
-}
-
-static inline void atomic16_set(Int128 *ptr, Int128 val)
-{
- Int128 old = *ptr, cmp;
- do {
- cmp = old;
- old = atomic16_cmpxchg(ptr, cmp, val);
- } while (old != cmp);
-}
-
-# define HAVE_ATOMIC128 1
-#else
-/* Fallback definitions that must be optimized away, or error. */
-Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
-void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
-# define HAVE_ATOMIC128 0
-#endif /* Some definition for HAVE_ATOMIC128 */
+#include "host/atomic128-cas.h"
+#include "host/atomic128-ldst.h"
#endif /* QEMU_ATOMIC128_H */
diff --git a/include/qemu/base64.h b/include/qemu/base64.h
index 0a3c5c9c53..46a75fbbeb 100644
--- a/include/qemu/base64.h
+++ b/include/qemu/base64.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 82a1d2f41f..97806811ee 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -22,23 +22,23 @@
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_to_le(dst, src, nbits) Convert bitmap to little endian
* bitmap_from_le(dst, src, nbits) Convert bitmap from little endian
* bitmap_copy_with_src_offset(dst, src, offset, nbits)
@@ -50,17 +50,17 @@
/*
* Also the following operations apply to bitmaps.
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
@@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
+bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map,
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index f55ce8b320..2c0a2fe751 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -51,7 +51,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
unsigned long mask = BIT_MASK(nr);
unsigned long *p = addr + BIT_WORD(nr);
- atomic_or(p, mask);
+ qatomic_or(p, mask);
}
/**
@@ -68,6 +68,19 @@ static inline void clear_bit(long nr, unsigned long *addr)
}
/**
+ * clear_bit_atomic - Clears a bit in memory atomically
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit_atomic(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ return qatomic_and(p, ~mask);
+}
+
+/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
@@ -140,7 +153,8 @@ static inline int test_bit(long nr, const unsigned long *addr)
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first set bit, or size.
+ * Returns the bit number of the last set bit,
+ * or @size if there is no set bit in the bitmap.
*/
unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
@@ -150,6 +164,9 @@ unsigned long find_last_bit(const unsigned long *addr,
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next set bit,
+ * or @size if there are no further set bits in the bitmap.
*/
unsigned long find_next_bit(const unsigned long *addr,
unsigned long size,
@@ -160,6 +177,9 @@ unsigned long find_next_bit(const unsigned long *addr,
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next cleared bit,
+ * or @size if there are no further clear bits in the bitmap.
*/
unsigned long find_next_zero_bit(const unsigned long *addr,
@@ -171,7 +191,8 @@ unsigned long find_next_zero_bit(const unsigned long *addr,
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first set bit.
+ * Returns the bit number of the first set bit,
+ * or @size if there is no set bit in the bitmap.
*/
static inline unsigned long find_first_bit(const unsigned long *addr,
unsigned long size)
@@ -194,7 +215,8 @@ static inline unsigned long find_first_bit(const unsigned long *addr,
* @addr: The address to start the search at
* @size: The maximum size to search
*
- * Returns the bit number of the first cleared bit.
+ * Returns the bit number of the first cleared bit,
+ * or @size if there is no clear bit in the bitmap.
*/
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size)
@@ -209,7 +231,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
*/
static inline uint8_t rol8(uint8_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((8 - shift) & 7));
+ return (word << (shift & 7)) | (word >> (-shift & 7));
}
/**
@@ -219,7 +241,7 @@ static inline uint8_t rol8(uint8_t word, unsigned int shift)
*/
static inline uint8_t ror8(uint8_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((8 - shift) & 7));
+ return (word >> (shift & 7)) | (word << (-shift & 7));
}
/**
@@ -229,7 +251,7 @@ static inline uint8_t ror8(uint8_t word, unsigned int shift)
*/
static inline uint16_t rol16(uint16_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((16 - shift) & 15));
+ return (word << (shift & 15)) | (word >> (-shift & 15));
}
/**
@@ -239,7 +261,7 @@ static inline uint16_t rol16(uint16_t word, unsigned int shift)
*/
static inline uint16_t ror16(uint16_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((16 - shift) & 15));
+ return (word >> (shift & 15)) | (word << (-shift & 15));
}
/**
@@ -249,7 +271,7 @@ static inline uint16_t ror16(uint16_t word, unsigned int shift)
*/
static inline uint32_t rol32(uint32_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((32 - shift) & 31));
+ return (word << (shift & 31)) | (word >> (-shift & 31));
}
/**
@@ -259,7 +281,7 @@ static inline uint32_t rol32(uint32_t word, unsigned int shift)
*/
static inline uint32_t ror32(uint32_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((32 - shift) & 31));
+ return (word >> (shift & 31)) | (word << (-shift & 31));
}
/**
@@ -269,7 +291,7 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift)
*/
static inline uint64_t rol64(uint64_t word, unsigned int shift)
{
- return (word << shift) | (word >> ((64 - shift) & 63));
+ return (word << (shift & 63)) | (word >> (-shift & 63));
}
/**
@@ -279,7 +301,36 @@ static inline uint64_t rol64(uint64_t word, unsigned int shift)
*/
static inline uint64_t ror64(uint64_t word, unsigned int shift)
{
- return (word >> shift) | (word << ((64 - shift) & 63));
+ return (word >> (shift & 63)) | (word << (-shift & 63));
+}
+
+/**
+ * hswap32 - swap 16-bit halfwords within a 32-bit value
+ * @h: value to swap
+ */
+static inline uint32_t hswap32(uint32_t h)
+{
+ return rol32(h, 16);
+}
+
+/**
+ * hswap64 - swap 16-bit halfwords within a 64-bit value
+ * @h: value to swap
+ */
+static inline uint64_t hswap64(uint64_t h)
+{
+ uint64_t m = 0x0000ffff0000ffffull;
+ h = rol64(h, 32);
+ return ((h & m) << 16) | ((h >> 16) & m);
+}
+
+/**
+ * wswap64 - swap 32-bit words within a 64-bit value
+ * @h: value to swap
+ */
+static inline uint64_t wswap64(uint64_t h)
+{
+ return rol64(h, 32);
}
/**
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index 1d3e4c24e4..bd67468e5e 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -1,85 +1,54 @@
#ifndef BSWAP_H
#define BSWAP_H
-#include "fpu/softfloat-types.h"
+#undef bswap16
+#define bswap16(_x) __builtin_bswap16(_x)
+#undef bswap32
+#define bswap32(_x) __builtin_bswap32(_x)
+#undef bswap64
+#define bswap64(_x) __builtin_bswap64(_x)
-#ifdef CONFIG_MACHINE_BSWAP_H
-# include <sys/endian.h>
-# include <machine/bswap.h>
-#elif defined(__FreeBSD__)
-# include <sys/endian.h>
-#elif defined(__HAIKU__)
-# include <endian.h>
-#elif defined(CONFIG_BYTESWAP_H)
-# include <byteswap.h>
-
-static inline uint16_t bswap16(uint16_t x)
-{
- return bswap_16(x);
-}
-
-static inline uint32_t bswap32(uint32_t x)
-{
- return bswap_32(x);
-}
-
-static inline uint64_t bswap64(uint64_t x)
+static inline uint32_t bswap24(uint32_t x)
{
- return bswap_64(x);
-}
-# else
-static inline uint16_t bswap16(uint16_t x)
-{
- return (((x & 0x00ff) << 8) |
- ((x & 0xff00) >> 8));
+ return (((x & 0x000000ffU) << 16) |
+ ((x & 0x0000ff00U) << 0) |
+ ((x & 0x00ff0000U) >> 16));
}
-static inline uint32_t bswap32(uint32_t x)
-{
- return (((x & 0x000000ffU) << 24) |
- ((x & 0x0000ff00U) << 8) |
- ((x & 0x00ff0000U) >> 8) |
- ((x & 0xff000000U) >> 24));
-}
-
-static inline uint64_t bswap64(uint64_t x)
+static inline void bswap16s(uint16_t *s)
{
- return (((x & 0x00000000000000ffULL) << 56) |
- ((x & 0x000000000000ff00ULL) << 40) |
- ((x & 0x0000000000ff0000ULL) << 24) |
- ((x & 0x00000000ff000000ULL) << 8) |
- ((x & 0x000000ff00000000ULL) >> 8) |
- ((x & 0x0000ff0000000000ULL) >> 24) |
- ((x & 0x00ff000000000000ULL) >> 40) |
- ((x & 0xff00000000000000ULL) >> 56));
+ *s = __builtin_bswap16(*s);
}
-#endif /* ! CONFIG_MACHINE_BSWAP_H */
-static inline void bswap16s(uint16_t *s)
+static inline void bswap24s(uint32_t *s)
{
- *s = bswap16(*s);
+ *s = bswap24(*s & 0x00ffffffU);
}
static inline void bswap32s(uint32_t *s)
{
- *s = bswap32(*s);
+ *s = __builtin_bswap32(*s);
}
static inline void bswap64s(uint64_t *s)
{
- *s = bswap64(*s);
+ *s = __builtin_bswap64(*s);
}
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define be_bswap(v, size) (v)
-#define le_bswap(v, size) glue(bswap, size)(v)
+#define le_bswap(v, size) glue(__builtin_bswap, size)(v)
+#define le_bswap24(v) bswap24(v)
#define be_bswaps(v, size)
-#define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
+#define le_bswaps(p, size) \
+ do { *p = glue(__builtin_bswap, size)(*p); } while (0)
#else
#define le_bswap(v, size) (v)
-#define be_bswap(v, size) glue(bswap, size)(v)
+#define le_bswap24(v) (v)
+#define be_bswap(v, size) glue(__builtin_bswap, size)(v)
#define le_bswaps(v, size)
-#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
+#define be_bswaps(p, size) \
+ do { *p = glue(__builtin_bswap, size)(*p); } while (0)
#endif
/**
@@ -169,18 +138,21 @@ CPU_CONVERT(le, 16, uint16_t)
CPU_CONVERT(le, 32, uint32_t)
CPU_CONVERT(le, 64, uint64_t)
-/* len must be one of 1, 2, 4 */
-static inline uint32_t qemu_bswap_len(uint32_t value, int len)
-{
- return bswap32(value) >> (32 - 8 * len);
-}
-
/*
- * Same as cpu_to_le{16,32}, except that gcc will figure the result is
+ * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
* a compile-time constant if you pass in a constant. So this can be
* used to initialize static variables.
*/
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
+# define const_le64(_x) \
+ ((((_x) & 0x00000000000000ffULL) << 56) | \
+ (((_x) & 0x000000000000ff00ULL) << 40) | \
+ (((_x) & 0x0000000000ff0000ULL) << 24) | \
+ (((_x) & 0x00000000ff000000ULL) << 8) | \
+ (((_x) & 0x000000ff00000000ULL) >> 8) | \
+ (((_x) & 0x0000ff0000000000ULL) >> 24) | \
+ (((_x) & 0x00ff000000000000ULL) >> 40) | \
+ (((_x) & 0xff00000000000000ULL) >> 56))
# define const_le32(_x) \
((((_x) & 0x000000ffU) << 24) | \
(((_x) & 0x0000ff00U) << 8) | \
@@ -190,68 +162,11 @@ static inline uint32_t qemu_bswap_len(uint32_t value, int len)
((((_x) & 0x00ff) << 8) | \
(((_x) & 0xff00) >> 8))
#else
+# define const_le64(_x) (_x)
# define const_le32(_x) (_x)
# define const_le16(_x) (_x)
#endif
-/* Unions for reinterpreting between floats and integers. */
-
-typedef union {
- float32 f;
- uint32_t l;
-} CPU_FloatU;
-
-typedef union {
- float64 d;
-#if defined(HOST_WORDS_BIGENDIAN)
- struct {
- uint32_t upper;
- uint32_t lower;
- } l;
-#else
- struct {
- uint32_t lower;
- uint32_t upper;
- } l;
-#endif
- uint64_t ll;
-} CPU_DoubleU;
-
-typedef union {
- floatx80 d;
- struct {
- uint64_t lower;
- uint16_t upper;
- } l;
-} CPU_LDoubleU;
-
-typedef union {
- float128 q;
-#if defined(HOST_WORDS_BIGENDIAN)
- struct {
- uint32_t upmost;
- uint32_t upper;
- uint32_t lower;
- uint32_t lowest;
- } l;
- struct {
- uint64_t upper;
- uint64_t lower;
- } ll;
-#else
- struct {
- uint32_t lowest;
- uint32_t lower;
- uint32_t upper;
- uint32_t upmost;
- } l;
- struct {
- uint64_t lower;
- uint64_t upper;
- } ll;
-#endif
-} CPU_QuadU;
-
/* unaligned/endian-independent pointer access */
/*
@@ -275,6 +190,7 @@ typedef union {
* size is:
* b: 8 bits
* w: 16 bits
+ * 24: 24 bits
* l: 32 bits
* q: 64 bits
*
@@ -347,6 +263,11 @@ static inline void stw_he_p(void *ptr, uint16_t v)
__builtin_memcpy(ptr, &v, sizeof(v));
}
+static inline void st24_he_p(void *ptr, uint32_t v)
+{
+ __builtin_memcpy(ptr, &v, 3);
+}
+
static inline int ldl_he_p(const void *ptr)
{
int32_t r;
@@ -396,6 +317,11 @@ static inline void stw_le_p(void *ptr, uint16_t v)
stw_he_p(ptr, le_bswap(v, 16));
}
+static inline void st24_le_p(void *ptr, uint32_t v)
+{
+ st24_he_p(ptr, le_bswap24(v));
+}
+
static inline void stl_le_p(void *ptr, uint32_t v)
{
stl_he_p(ptr, le_bswap(v, 32));
@@ -406,36 +332,6 @@ static inline void stq_le_p(void *ptr, uint64_t v)
stq_he_p(ptr, le_bswap(v, 64));
}
-/* float access */
-
-static inline float32 ldfl_le_p(const void *ptr)
-{
- CPU_FloatU u;
- u.l = ldl_le_p(ptr);
- return u.f;
-}
-
-static inline void stfl_le_p(void *ptr, float32 v)
-{
- CPU_FloatU u;
- u.f = v;
- stl_le_p(ptr, u.l);
-}
-
-static inline float64 ldfq_le_p(const void *ptr)
-{
- CPU_DoubleU u;
- u.ll = ldq_le_p(ptr);
- return u.d;
-}
-
-static inline void stfq_le_p(void *ptr, float64 v)
-{
- CPU_DoubleU u;
- u.d = v;
- stq_le_p(ptr, u.ll);
-}
-
static inline int lduw_be_p(const void *ptr)
{
return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
@@ -471,36 +367,6 @@ static inline void stq_be_p(void *ptr, uint64_t v)
stq_he_p(ptr, be_bswap(v, 64));
}
-/* float access */
-
-static inline float32 ldfl_be_p(const void *ptr)
-{
- CPU_FloatU u;
- u.l = ldl_be_p(ptr);
- return u.f;
-}
-
-static inline void stfl_be_p(void *ptr, float32 v)
-{
- CPU_FloatU u;
- u.f = v;
- stl_be_p(ptr, u.l);
-}
-
-static inline float64 ldfq_be_p(const void *ptr)
-{
- CPU_DoubleU u;
- u.ll = ldq_be_p(ptr);
- return u.d;
-}
-
-static inline void stfq_be_p(void *ptr, float64 v)
-{
- CPU_DoubleU u;
- u.d = v;
- stq_be_p(ptr, u.ll);
-}
-
static inline unsigned long leul_to_cpu(unsigned long v)
{
#if HOST_LONG_BITS == 32
diff --git a/include/qemu/buffer.h b/include/qemu/buffer.h
index 3a909aeca4..e95dfd696c 100644
--- a/include/qemu/buffer.h
+++ b/include/qemu/buffer.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -49,7 +49,7 @@ struct Buffer {
* to identify in debug traces.
*/
void buffer_init(Buffer *buffer, const char *name, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
/**
* buffer_shrink:
diff --git a/include/qemu/cacheflush.h b/include/qemu/cacheflush.h
new file mode 100644
index 0000000000..ae20bcda73
--- /dev/null
+++ b/include/qemu/cacheflush.h
@@ -0,0 +1,35 @@
+/*
+ * Flush the host cpu caches.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CACHEFLUSH_H
+#define QEMU_CACHEFLUSH_H
+
+/**
+ * flush_idcache_range:
+ * @rx: instruction address
+ * @rw: data address
+ * @len: length to flush
+ *
+ * Flush @len bytes of the data cache at @rw and the icache at @rx
+ * to bring them in sync. The two addresses may be different virtual
+ * mappings of the same physical page(s).
+ */
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
+
+static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
+{
+ /* icache is coherent and does not require flushing. */
+}
+
+#else
+
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len);
+
+#endif
+
+#endif /* QEMU_CACHEFLUSH_H */
diff --git a/include/qemu/cacheinfo.h b/include/qemu/cacheinfo.h
new file mode 100644
index 0000000000..019a157ea0
--- /dev/null
+++ b/include/qemu/cacheinfo.h
@@ -0,0 +1,21 @@
+/*
+ * QEMU host cacheinfo information
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_CACHEINFO_H
+#define QEMU_CACHEINFO_H
+
+/*
+ * These variables represent our best guess at the host icache and
+ * dcache sizes, expressed both as the size in bytes and as the
+ * base-2 log of the size in bytes. They are initialized at startup
+ * (via an attribute 'constructor' function).
+ */
+extern int qemu_icache_linesize;
+extern int qemu_icache_linesize_log;
+extern int qemu_dcache_linesize;
+extern int qemu_dcache_linesize_log;
+
+#endif
diff --git a/include/qemu/chardev_open.h b/include/qemu/chardev_open.h
new file mode 100644
index 0000000000..64e8fcfdcb
--- /dev/null
+++ b/include/qemu/chardev_open.h
@@ -0,0 +1,16 @@
+/*
+ * QEMU Chardev Helper
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CHARDEV_OPEN_H
+#define QEMU_CHARDEV_OPEN_H
+
+int open_cdev(const char *devpath, dev_t cdev);
+#endif
diff --git a/include/qemu/clang-tsa.h b/include/qemu/clang-tsa.h
new file mode 100644
index 0000000000..ba06fb8c92
--- /dev/null
+++ b/include/qemu/clang-tsa.h
@@ -0,0 +1,114 @@
+#ifndef CLANG_TSA_H
+#define CLANG_TSA_H
+
+/*
+ * Copyright 2018 Jarkko Hietaniemi <jhi@iki.fi>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+ *
+ * TSA is available since clang 3.6-ish.
+ */
+#ifdef __clang__
+# define TSA(x) __attribute__((x))
+#else
+# define TSA(x) /* No TSA, make TSA attributes no-ops. */
+#endif
+
+/* TSA_CAPABILITY() is used to annotate typedefs:
+ *
+ * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex;
+ */
+#define TSA_CAPABILITY(x) TSA(capability(x))
+
+/* TSA_GUARDED_BY() is used to annotate global variables,
+ * the data is guarded:
+ *
+ * Foo foo TSA_GUARDED_BY(mutex);
+ */
+#define TSA_GUARDED_BY(x) TSA(guarded_by(x))
+
+/* TSA_PT_GUARDED_BY() is used to annotate global pointers, the data
+ * behind the pointer is guarded.
+ *
+ * Foo* ptr TSA_PT_GUARDED_BY(mutex);
+ */
+#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x))
+
+/* The TSA_REQUIRES() is used to annotate functions: the caller of the
+ * function MUST hold the resource, the function will NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_REQUIRES(mutex);
+ */
+#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__))
+#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__))
+
+/* TSA_EXCLUDES() is used to annotate functions: the caller of the
+ * function MUST NOT hold resource, the function first acquires the
+ * resource, and then releases it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_EXCLUDES(mutex);
+ */
+#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__))
+
+/* TSA_ACQUIRE() is used to annotate functions: the caller of the
+ * function MUST NOT hold the resource, the function will acquire the
+ * resource, but NOT release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_ACQUIRE(mutex);
+ */
+#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__))
+#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__))
+
+/* TSA_RELEASE() is used to annotate functions: the caller of the
+ * function MUST hold the resource, but the function will then release it.
+ *
+ * More than one mutex may be specified, comma-separated.
+ *
+ * void Foo(void) TSA_RELEASE(mutex);
+ */
+#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__))
+#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__))
+
+/* TSA_NO_TSA is used to annotate functions. Use only when you need to.
+ *
+ * void Foo(void) TSA_NO_TSA;
+ */
+#define TSA_NO_TSA TSA(no_thread_safety_analysis)
+
+/*
+ * TSA_ASSERT() is used to annotate functions: This function will assert that
+ * the lock is held. When it returns, the caller of the function is assumed to
+ * already hold the resource.
+ *
+ * More than one mutex may be specified, comma-separated.
+ */
+#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__))
+#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__))
+
+#endif /* #ifndef CLANG_TSA_H */
diff --git a/include/qemu/co-shared-resource.h b/include/qemu/co-shared-resource.h
index 4e4503004c..78ca5850f8 100644
--- a/include/qemu/co-shared-resource.h
+++ b/include/qemu/co-shared-resource.h
@@ -26,15 +26,13 @@
#ifndef QEMU_CO_SHARED_RESOURCE_H
#define QEMU_CO_SHARED_RESOURCE_H
-
+/* Accesses to co-shared-resource API are thread-safe */
typedef struct SharedResource SharedResource;
/*
* Create SharedResource structure
*
* @total: total amount of some resource to be shared between clients
- *
- * Note: this API is not thread-safe.
*/
SharedResource *shres_create(uint64_t total);
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index c76281f354..c797f0d457 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -7,27 +7,21 @@
#ifndef COMPILER_H
#define COMPILER_H
+#define HOST_BIG_ENDIAN (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+
+/* HOST_LONG_BITS is the size of a native pointer in bits. */
+#define HOST_LONG_BITS (__SIZEOF_POINTER__ * 8)
+
#if defined __clang_analyzer__ || defined __COVERITY__
#define QEMU_STATIC_ANALYSIS 1
#endif
-/*----------------------------------------------------------------------------
-| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler.
-| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h.
-*----------------------------------------------------------------------------*/
-#if defined(__GNUC__) && defined(__GNUC_MINOR__)
-# define QEMU_GNUC_PREREQ(maj, min) \
- ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#ifdef __cplusplus
+#define QEMU_EXTERN_C extern "C"
#else
-# define QEMU_GNUC_PREREQ(maj, min) 0
+#define QEMU_EXTERN_C extern
#endif
-#define QEMU_NORETURN __attribute__ ((__noreturn__))
-
-#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-
-#define QEMU_SENTINEL __attribute__((sentinel))
-
#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
# define QEMU_PACKED __attribute__((gcc_struct, packed))
#else
@@ -39,15 +33,14 @@
#ifndef glue
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
-#define stringify(s) tostring(s)
-#define tostring(s) #s
+#define stringify(s) tostring(s)
+#define tostring(s) #s
#endif
-#ifndef likely
-#if __GNUC__ < 3
-#define __builtin_expect(x, n) (x)
-#endif
+/* Expands into an identifier stemN, where N is another number each time */
+#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__)
+#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
@@ -68,14 +61,10 @@
(offsetof(container, field) + sizeof_field(container, field))
/* Convert from a base type to a parent type, with compile time checking. */
-#ifdef __GNUC__
#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
char __attribute__((unused)) offset_must_be_zero[ \
-offsetof(type, field)]; \
container_of(dev, type, field);}))
-#else
-#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
-#endif
#define typeof_field(type, field) typeof(((type *)0)->field)
#define type_check(t1,t2) ((t1*)0 - (t2*)0)
@@ -85,39 +74,19 @@
int:(x) ? -1 : 1; \
}
-/* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is
- * supported; otherwise, it will be omitted from the compiler error
- * message (but as it remains present in the source code, it can still
- * be useful when debugging). */
-#if defined(CONFIG_STATIC_ASSERT)
#define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg)
-#elif defined(__COUNTER__)
-#define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \
- glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused))
-#else
-#define QEMU_BUILD_BUG_MSG(x, msg)
-#endif
#define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x)
#define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \
sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)))
-#if defined __GNUC__
-# if !QEMU_GNUC_PREREQ(4, 4)
- /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */
-# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m)))
-# else
- /* Use gnu_printf when supported (qemu uses standard format strings). */
-# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m)))
-# if defined(_WIN32)
- /* Map __printf__ to __gnu_printf__ because we want standard format strings
- * even when MinGW or GLib include files use __printf__. */
-# define __printf__ __gnu_printf__
-# endif
-# endif
-#else
-#define GCC_FMT_ATTR(n, m)
+#if !defined(__clang__) && defined(_WIN32)
+/*
+ * Map __printf__ to __gnu_printf__ because we want standard format strings even
+ * when MinGW or GLib include files use __printf__.
+ */
+# define __printf__ __gnu_printf__
#endif
#ifndef __has_warning
@@ -140,6 +109,14 @@
#define __has_attribute(x) 0 /* compatibility with older GCC */
#endif
+#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)
+# define QEMU_SANITIZE_ADDRESS 1
+#endif
+
+#if defined(__SANITIZE_THREAD__) || __has_feature(thread_sanitizer)
+# define QEMU_SANITIZE_THREAD 1
+#endif
+
/*
* GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC
* versions we support have the "flatten" attribute. Clang may not have the
@@ -188,59 +165,66 @@
#define QEMU_ALWAYS_INLINE
#endif
-/* Implement C11 _Generic via GCC builtins. Example:
- *
- * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
- *
- * The first argument is the discriminator. The last is the default value.
- * The middle ones are tuples in "(type, expansion)" format.
+/**
+ * In most cases, normal "fallthrough" comments are good enough for
+ * switch-case statements, but sometimes the compiler has problems
+ * with those. In that case you can use QEMU_FALLTHROUGH instead.
*/
+#if __has_attribute(fallthrough)
+# define QEMU_FALLTHROUGH __attribute__((fallthrough))
+#else
+# define QEMU_FALLTHROUGH do {} while (0) /* fallthrough */
+#endif
-/* First, find out the number of generic cases. */
-#define QEMU_GENERIC(x, ...) \
- QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-/* There will be extra arguments, but they are not used. */
-#define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \
- QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
-
-/* Two more helper macros, this time to extract items from a parenthesized
- * list.
+#ifdef CONFIG_CFI
+/*
+ * If CFI is enabled, use an attribute to disable cfi-icall on the following
+ * function
*/
-#define QEMU_FIRST_(a, b) a
-#define QEMU_SECOND_(a, b) b
-
-/* ... and a final one for the common part of the "recursion". */
-#define QEMU_GENERIC_IF(x, type_then, else_) \
- __builtin_choose_expr(__builtin_types_compatible_p(x, \
- QEMU_FIRST_ type_then), \
- QEMU_SECOND_ type_then, else_)
-
-/* CPP poor man's "recursion". */
-#define QEMU_GENERIC1(x, a0, ...) (a0)
-#define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__))
-#define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__))
-#define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__))
-#define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__))
-#define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__))
-#define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__))
-#define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__))
-#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
-#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
+#define QEMU_DISABLE_CFI __attribute__((no_sanitize("cfi-icall")))
+#else
+/* If CFI is not enabled, use an empty define to not change the behavior */
+#define QEMU_DISABLE_CFI
+#endif
-/**
- * qemu_build_not_reached()
- *
- * The compiler, during optimization, is expected to prove that a call
- * to this function cannot be reached and remove it. If the compiler
- * supports QEMU_ERROR, this will be reported at compile time; otherwise
- * this will be reported at link time due to the missing symbol.
+/*
+ * Apple clang version 14 has a bug in its __builtin_subcll(); define
+ * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it.
+ * When a version of Apple clang which has this bug fixed is released
+ * we can add an upper bound to this check.
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1631
+ * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details.
+ * The bug never made it into any upstream LLVM releases, only Apple ones.
*/
-#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
-extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
- qemu_build_not_reached(void);
+#if defined(__apple_build_version__) && __clang_major__ >= 14
+#define BUILTIN_SUBCLL_BROKEN
+#endif
+
+#if __has_attribute(annotate)
+#define QEMU_ANNOTATE(x) __attribute__((annotate(x)))
+#else
+#define QEMU_ANNOTATE(x)
+#endif
+
+#if __has_attribute(used)
+# define QEMU_USED __attribute__((used))
#else
-#define qemu_build_not_reached() g_assert_not_reached()
+# define QEMU_USED
#endif
+/*
+ * Ugly CPP trick that is like "defined FOO", but also works in C
+ * code. Useful to replace #ifdef with "if" statements; assumes
+ * the symbol was defined with Meson's "config.set()", so it is empty
+ * if defined.
+ */
+#define IS_ENABLED(x) IS_EMPTY(x)
+
+#define IS_EMPTY_JUNK_ junk,
+#define IS_EMPTY(value) IS_EMPTY_(IS_EMPTY_JUNK_##value)
+
+/* Expands to either SECOND_ARG(junk, 1, 0) or SECOND_ARG(IS_EMPTY_JUNK_CONFIG_FOO 1, 0) */
+#define SECOND_ARG(first, second, ...) second
+#define IS_EMPTY_(junk_maybecomma) SECOND_ARG(junk_maybecomma 1, 0)
+
#endif /* COMPILER_H */
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
index d74f920152..51b310fa3b 100644
--- a/include/qemu/config-file.h
+++ b/include/qemu/config-file.h
@@ -1,24 +1,31 @@
#ifndef QEMU_CONFIG_FILE_H
#define QEMU_CONFIG_FILE_H
+typedef void QEMUConfigCB(const char *group, QDict *qdict, void *opaque, Error **errp);
+void qemu_load_module_for_opts(const char *group);
QemuOptsList *qemu_find_opts(const char *group);
QemuOptsList *qemu_find_opts_err(const char *group, Error **errp);
QemuOpts *qemu_find_opts_singleton(const char *group);
+extern QemuOptsList *vm_config_groups[];
+extern QemuOptsList *drive_config_groups[];
+
void qemu_add_opts(QemuOptsList *list);
void qemu_add_drive_opts(QemuOptsList *list);
-int qemu_set_option(const char *str);
int qemu_global_option(const char *str);
-void qemu_config_write(FILE *fp);
-int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname);
+int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname,
+ Error **errp);
+
+/* A default callback for qemu_read_config_file(). */
+void qemu_config_do_parse(const char *group, QDict *qdict, void *opaque, Error **errp);
-int qemu_read_config_file(const char *filename);
+int qemu_read_config_file(const char *filename, QEMUConfigCB *f, Error **errp);
/* Parse QDict options as a replacement for a config file (allowing multiple
enumerated (0..(n-1)) configuration "sections") */
-void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists,
+bool qemu_config_parse_qdict(QDict *options, QemuOptsList **lists,
Error **errp);
#endif /* QEMU_CONFIG_FILE_H */
diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h
new file mode 100644
index 0000000000..503bad6e0e
--- /dev/null
+++ b/include/qemu/coroutine-core.h
@@ -0,0 +1,154 @@
+/*
+ * QEMU coroutine implementation
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Kevin Wolf <kwolf@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_COROUTINE_CORE_H
+#define QEMU_COROUTINE_CORE_H
+
+/**
+ * Coroutines are a mechanism for stack switching and can be used for
+ * cooperative userspace threading. These functions provide a simple but
+ * useful flavor of coroutines that is suitable for writing sequential code,
+ * rather than callbacks, for operations that need to give up control while
+ * waiting for events to complete.
+ *
+ * These functions are re-entrant and may be used outside the BQL.
+ *
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ *
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ */
+
+/**
+ * Mark a function that executes in coroutine context
+ *
+ *
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ *
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ */
+
+typedef struct Coroutine Coroutine;
+typedef struct CoMutex CoMutex;
+
+/**
+ * Coroutine entry point
+ *
+ * When the coroutine is entered for the first time, opaque is passed in as an
+ * argument.
+ *
+ * When this function returns, the coroutine is destroyed automatically and
+ * execution continues in the caller who last entered the coroutine.
+ */
+typedef void coroutine_fn CoroutineEntry(void *opaque);
+
+/**
+ * Create a new coroutine
+ *
+ * Use qemu_coroutine_enter() to actually transfer control to the coroutine.
+ * The opaque argument is passed as the argument to the entry point.
+ */
+Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque);
+
+/**
+ * Transfer control to a coroutine
+ */
+void qemu_coroutine_enter(Coroutine *coroutine);
+
+/**
+ * Transfer control to a coroutine if it's not active (i.e. part of the call
+ * stack of the running coroutine). Otherwise, do nothing.
+ */
+void qemu_coroutine_enter_if_inactive(Coroutine *co);
+
+/**
+ * Transfer control to a coroutine and associate it with ctx
+ */
+void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
+
+/**
+ * Transfer control back to a coroutine's caller
+ *
+ * This function does not return until the coroutine is re-entered using
+ * qemu_coroutine_enter().
+ */
+void coroutine_fn qemu_coroutine_yield(void);
+
+/**
+ * Get the AioContext of the given coroutine
+ */
+AioContext *qemu_coroutine_get_aio_context(Coroutine *co);
+
+/**
+ * Get the currently executing coroutine
+ */
+Coroutine *qemu_coroutine_self(void);
+
+/**
+ * Return whether or not currently inside a coroutine
+ *
+ * This can be used to write functions that work both when in coroutine context
+ * and when not in coroutine context. Note that such functions cannot use the
+ * coroutine_fn annotation since they work outside coroutine context.
+ */
+bool qemu_in_coroutine(void);
+
+/**
+ * Return true if the coroutine is currently entered
+ *
+ * A coroutine is "entered" if it has not yielded from the current
+ * qemu_coroutine_enter() call used to run it. This does not mean that the
+ * coroutine is currently executing code since it may have transferred control
+ * to another coroutine using qemu_coroutine_enter().
+ *
+ * When several coroutines enter each other there may be no way to know which
+ * ones have already been entered. In such situations this function can be
+ * used to avoid recursively entering coroutines.
+ */
+bool qemu_coroutine_entered(Coroutine *co);
+
+/**
+ * Initialises a CoMutex. This must be called before any other operation is used
+ * on the CoMutex.
+ */
+void qemu_co_mutex_init(CoMutex *mutex);
+
+/**
+ * Locks the mutex. If the lock cannot be taken immediately, control is
+ * transferred to the caller of the current coroutine.
+ */
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
+
+/**
+ * Unlocks the mutex and schedules the next coroutine that was waiting for this
+ * lock to be run.
+ */
+void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
+
+#endif
diff --git a/include/qemu/coroutine-tls.h b/include/qemu/coroutine-tls.h
new file mode 100644
index 0000000000..1558a826aa
--- /dev/null
+++ b/include/qemu/coroutine-tls.h
@@ -0,0 +1,165 @@
+/*
+ * QEMU Thread Local Storage for coroutines
+ *
+ * Copyright Red Hat
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ * It is forbidden to access Thread Local Storage in coroutines because
+ * compiler optimizations may cause values to be cached across coroutine
+ * re-entry. Coroutines can run in more than one thread through the course of
+ * their life, leading bugs when stale TLS values from the wrong thread are
+ * used as a result of compiler optimization.
+ *
+ * An example is:
+ *
+ * ..code-block:: c
+ * :caption: A coroutine that may see the wrong TLS value
+ *
+ * static __thread AioContext *current_aio_context;
+ * ...
+ * static void coroutine_fn foo(void)
+ * {
+ * aio_notify(current_aio_context);
+ * qemu_coroutine_yield();
+ * aio_notify(current_aio_context); // <-- may be stale after yielding!
+ * }
+ *
+ * This header provides macros for safely defining variables in Thread Local
+ * Storage:
+ *
+ * ..code-block:: c
+ * :caption: A coroutine that safely uses TLS
+ *
+ * QEMU_DEFINE_STATIC_CO_TLS(AioContext *, current_aio_context)
+ * ...
+ * static void coroutine_fn foo(void)
+ * {
+ * aio_notify(get_current_aio_context());
+ * qemu_coroutine_yield();
+ * aio_notify(get_current_aio_context()); // <-- safe
+ * }
+ */
+
+#ifndef QEMU_COROUTINE_TLS_H
+#define QEMU_COROUTINE_TLS_H
+
+/*
+ * To stop the compiler from caching TLS values we define accessor functions
+ * with __attribute__((noinline)) plus asm volatile("") to prevent
+ * optimizations that override noinline.
+ *
+ * The compiler can still analyze noinline code and make optimizations based on
+ * that knowledge, so an inline asm output operand is used to prevent
+ * optimizations that make assumptions about the address of the TLS variable.
+ *
+ * This is fragile and ultimately needs to be solved by a mechanism that is
+ * guaranteed to work by the compiler (e.g. stackless coroutines), but for now
+ * we use this approach to prevent issues.
+ */
+
+/**
+ * QEMU_DECLARE_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Declare an extern variable in Thread Local Storage from a header file:
+ *
+ * .. code-block:: c
+ * :caption: Declaring an extern variable in Thread Local Storage
+ *
+ * QEMU_DECLARE_CO_TLS(int, my_count)
+ * ...
+ * int c = get_my_count();
+ * set_my_count(c + 1);
+ * *get_ptr_my_count() = 0;
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Declaring a TLS variable using __thread
+ *
+ * extern __thread int my_count;
+ * ...
+ * int c = my_count;
+ * my_count = c + 1;
+ * *(&my_count) = 0;
+ */
+#define QEMU_DECLARE_CO_TLS(type, var) \
+ __attribute__((noinline)) type get_##var(void); \
+ __attribute__((noinline)) void set_##var(type v); \
+ __attribute__((noinline)) type *get_ptr_##var(void);
+
+/**
+ * QEMU_DEFINE_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Define a variable in Thread Local Storage that was previously declared from
+ * a header file with QEMU_DECLARE_CO_TLS():
+ *
+ * .. code-block:: c
+ * :caption: Defining a variable in Thread Local Storage
+ *
+ * QEMU_DEFINE_CO_TLS(int, my_count)
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Defining a TLS variable using __thread
+ *
+ * __thread int my_count;
+ */
+#define QEMU_DEFINE_CO_TLS(type, var) \
+ static __thread type co_tls_##var; \
+ type get_##var(void) { asm volatile(""); return co_tls_##var; } \
+ void set_##var(type v) { asm volatile(""); co_tls_##var = v; } \
+ type *get_ptr_##var(void) \
+ { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; }
+
+/**
+ * QEMU_DEFINE_STATIC_CO_TLS:
+ * @type: the variable's C type
+ * @var: the variable name
+ *
+ * Define a static variable in Thread Local Storage:
+ *
+ * .. code-block:: c
+ * :caption: Defining a static variable in Thread Local Storage
+ *
+ * QEMU_DEFINE_STATIC_CO_TLS(int, my_count)
+ * ...
+ * int c = get_my_count();
+ * set_my_count(c + 1);
+ * *get_ptr_my_count() = 0;
+ *
+ * This is a coroutine-safe replacement for the __thread keyword and is
+ * equivalent to the following code:
+ *
+ * .. code-block:: c
+ * :caption: Defining a static TLS variable using __thread
+ *
+ * static __thread int my_count;
+ * ...
+ * int c = my_count;
+ * my_count = c + 1;
+ * *(&my_count) = 0;
+ */
+#define QEMU_DEFINE_STATIC_CO_TLS(type, var) \
+ static __thread type co_tls_##var; \
+ static __attribute__((noinline, unused)) \
+ type get_##var(void) \
+ { asm volatile(""); return co_tls_##var; } \
+ static __attribute__((noinline, unused)) \
+ void set_##var(type v) \
+ { asm volatile(""); co_tls_##var = v; } \
+ static __attribute__((noinline, unused)) \
+ type *get_ptr_##var(void) \
+ { type *ptr = &co_tls_##var; asm volatile("" : "+rm" (ptr)); return ptr; }
+
+#endif /* QEMU_COROUTINE_TLS_H */
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index dfd261c5b1..e6aff45301 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -15,6 +15,7 @@
#ifndef QEMU_COROUTINE_H
#define QEMU_COROUTINE_H
+#include "qemu/coroutine-core.h"
#include "qemu/queue.h"
#include "qemu/timer.h"
@@ -25,102 +26,20 @@
* rather than callbacks, for operations that need to give up control while
* waiting for events to complete.
*
- * These functions are re-entrant and may be used outside the global mutex.
- */
-
-/**
- * Mark a function that executes in coroutine context
+ * These functions are re-entrant and may be used outside the BQL.
*
- * Functions that execute in coroutine context cannot be called directly from
- * normal functions. In the future it would be nice to enable compiler or
- * static checker support for catching such errors. This annotation might make
- * it possible and in the meantime it serves as documentation.
- *
- * For example:
+ * Functions that execute in coroutine context cannot be called
+ * directly from normal functions. Use @coroutine_fn to mark such
+ * functions. For example:
*
* static void coroutine_fn foo(void) {
* ....
* }
- */
-#define coroutine_fn
-
-typedef struct Coroutine Coroutine;
-
-/**
- * Coroutine entry point
- *
- * When the coroutine is entered for the first time, opaque is passed in as an
- * argument.
- *
- * When this function returns, the coroutine is destroyed automatically and
- * execution continues in the caller who last entered the coroutine.
- */
-typedef void coroutine_fn CoroutineEntry(void *opaque);
-
-/**
- * Create a new coroutine
- *
- * Use qemu_coroutine_enter() to actually transfer control to the coroutine.
- * The opaque argument is passed as the argument to the entry point.
- */
-Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque);
-
-/**
- * Transfer control to a coroutine
- */
-void qemu_coroutine_enter(Coroutine *coroutine);
-
-/**
- * Transfer control to a coroutine if it's not active (i.e. part of the call
- * stack of the running coroutine). Otherwise, do nothing.
- */
-void qemu_coroutine_enter_if_inactive(Coroutine *co);
-
-/**
- * Transfer control to a coroutine and associate it with ctx
- */
-void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
-
-/**
- * Transfer control back to a coroutine's caller
- *
- * This function does not return until the coroutine is re-entered using
- * qemu_coroutine_enter().
- */
-void coroutine_fn qemu_coroutine_yield(void);
-
-/**
- * Get the AioContext of the given coroutine
- */
-AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
-
-/**
- * Get the currently executing coroutine
- */
-Coroutine *coroutine_fn qemu_coroutine_self(void);
-
-/**
- * Return whether or not currently inside a coroutine
- *
- * This can be used to write functions that work both when in coroutine context
- * and when not in coroutine context. Note that such functions cannot use the
- * coroutine_fn annotation since they work outside coroutine context.
- */
-bool qemu_in_coroutine(void);
-
-/**
- * Return true if the coroutine is currently entered
*
- * A coroutine is "entered" if it has not yielded from the current
- * qemu_coroutine_enter() call used to run it. This does not mean that the
- * coroutine is currently executing code since it may have transferred control
- * to another coroutine using qemu_coroutine_enter().
- *
- * When several coroutines enter each other there may be no way to know which
- * ones have already been entered. In such situations this function can be
- * used to avoid recursively entering coroutines.
+ * In the future it would be nice to have the compiler or a static
+ * checker catch misuse of such functions. This annotation might make
+ * it possible and in the meantime it serves as documentation.
*/
-bool qemu_coroutine_entered(Coroutine *co);
/**
* Provides a mutex that can be used to synchronise coroutines
@@ -150,24 +69,6 @@ struct CoMutex {
};
/**
- * Initialises a CoMutex. This must be called before any other operation is used
- * on the CoMutex.
- */
-void qemu_co_mutex_init(CoMutex *mutex);
-
-/**
- * Locks the mutex. If the lock cannot be taken immediately, control is
- * transferred to the caller of the current coroutine.
- */
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
-
-/**
- * Unlocks the mutex and schedules the next coroutine that was waiting for this
- * lock to be run.
- */
-void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
-
-/**
* Assert that the current coroutine holds @mutex.
*/
static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex)
@@ -179,7 +80,7 @@ static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex)
* because the condition will be false no matter whether we read NULL or
* the pointer for any other coroutine.
*/
- assert(atomic_read(&mutex->locked) &&
+ assert(qatomic_read(&mutex->locked) &&
mutex->holder == qemu_coroutine_self());
}
@@ -198,23 +99,38 @@ typedef struct CoQueue {
*/
void qemu_co_queue_init(CoQueue *queue);
+typedef enum {
+ /*
+ * Enqueue at front instead of back. Use this to re-queue a request when
+ * its wait condition is not satisfied after being woken up.
+ */
+ CO_QUEUE_WAIT_FRONT = 0x1,
+} CoQueueWaitFlags;
+
/**
* Adds the current coroutine to the CoQueue and transfers control to the
* caller of the coroutine. The mutex is unlocked during the wait and
* locked again afterwards.
*/
#define qemu_co_queue_wait(queue, lock) \
- qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock))
-void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock);
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), 0)
+#define qemu_co_queue_wait_flags(queue, lock, flags) \
+ qemu_co_queue_wait_impl(queue, QEMU_MAKE_LOCKABLE(lock), (flags))
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock,
+ CoQueueWaitFlags flags);
/**
- * Removes the next coroutine from the CoQueue, and wake it up.
+ * Removes the next coroutine from the CoQueue, and queue it to run after
+ * the currently-running coroutine yields.
* Returns true if a coroutine was removed, false if the queue is empty.
+ * Used from coroutine context, use qemu_co_enter_next outside.
*/
bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
/**
- * Empties the CoQueue; all coroutines are woken up.
+ * Empties the CoQueue and queues the coroutine to run after
+ * the currently-running coroutine yields.
+ * Used from coroutine context, use qemu_co_enter_all outside.
*/
void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
@@ -232,16 +148,33 @@ void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock);
/**
+ * Empties the CoQueue, waking the waiting coroutine one at a time. Unlike
+ * qemu_co_queue_all, this function releases the lock during aio_co_wake
+ * because it is meant to be used outside coroutine context; in that case, the
+ * coroutine is entered immediately, before qemu_co_enter_all returns.
+ *
+ * If used in coroutine context, qemu_co_enter_all is equivalent to
+ * qemu_co_queue_all.
+ */
+#define qemu_co_enter_all(queue, lock) \
+ qemu_co_enter_all_impl(queue, QEMU_MAKE_LOCKABLE(lock))
+void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock);
+
+/**
* Checks if the CoQueue is empty.
*/
bool qemu_co_queue_empty(CoQueue *queue);
+typedef struct CoRwTicket CoRwTicket;
typedef struct CoRwlock {
- int pending_writer;
- int reader;
CoMutex mutex;
- CoQueue queue;
+
+ /* Number of readers, or -1 if owned for writing. */
+ int owners;
+
+ /* Waiting coroutines. */
+ QSIMPLEQ_HEAD(, CoRwTicket) tickets;
} CoRwlock;
/**
@@ -255,17 +188,16 @@ void qemu_co_rwlock_init(CoRwlock *lock);
* of a parallel writer, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_rdlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock);
/**
* Write Locks the CoRwlock from a reader. This is a bit more efficient than
* @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock.
- * However, if the lock cannot be upgraded immediately, control is transferred
- * to the caller of the current coroutine. Also, @qemu_co_rwlock_upgrade
- * only overrides CoRwlock fairness if there are no concurrent readers, so
- * another writer might run while @qemu_co_rwlock_upgrade blocks.
+ * Note that if the lock cannot be upgraded immediately, control is transferred
+ * to the caller of the current coroutine; another writer might run while
+ * @qemu_co_rwlock_upgrade blocks.
*/
-void qemu_co_rwlock_upgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock);
/**
* Downgrades a write-side critical section to a reader. Downgrading with
@@ -273,44 +205,64 @@ void qemu_co_rwlock_upgrade(CoRwlock *lock);
* followed by @qemu_co_rwlock_rdlock. This makes it more efficient, but
* may also sometimes be necessary for correctness.
*/
-void qemu_co_rwlock_downgrade(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock);
/**
* Write Locks the mutex. If the lock cannot be taken immediately because
* of a parallel reader, control is transferred to the caller of the current
* coroutine.
*/
-void qemu_co_rwlock_wrlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock);
/**
* Unlocks the read/write lock and schedules the next coroutine that was
* waiting for this lock to be run.
*/
-void qemu_co_rwlock_unlock(CoRwlock *lock);
+void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock);
-typedef struct QemuCoSleepState QemuCoSleepState;
+typedef struct QemuCoSleep {
+ Coroutine *to_wake;
+} QemuCoSleep;
/**
- * Yield the coroutine for a given duration. During this yield, @sleep_state
- * (if not NULL) is set to an opaque pointer, which may be used for
- * qemu_co_sleep_wake(). Be careful, the pointer is set back to zero when the
- * timer fires. Don't save the obtained value to other variables and don't call
- * qemu_co_sleep_wake from another aio context.
+ * Yield the coroutine for a given duration. Initializes @w so that,
+ * during this yield, it can be passed to qemu_co_sleep_wake() to
+ * terminate the sleep.
*/
-void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns,
- QemuCoSleepState **sleep_state);
+void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w,
+ QEMUClockType type, int64_t ns);
+
+/**
+ * Yield the coroutine until the next call to qemu_co_sleep_wake.
+ */
+void coroutine_fn qemu_co_sleep(QemuCoSleep *w);
+
static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
{
- qemu_co_sleep_ns_wakeable(type, ns, NULL);
+ QemuCoSleep w = { 0 };
+ qemu_co_sleep_ns_wakeable(&w, type, ns);
}
+typedef void CleanupFunc(void *opaque);
+/**
+ * Run entry in a coroutine and start timer. Wait for entry to finish or for
+ * timer to elapse, what happen first. If entry finished, return 0, if timer
+ * elapsed earlier, return -ETIMEDOUT.
+ *
+ * Be careful, entry execution is not canceled, user should handle it somehow.
+ * If @clean is provided, it's called after coroutine finish if timeout
+ * happened.
+ */
+int coroutine_fn qemu_co_timeout(CoroutineEntry *entry, void *opaque,
+ uint64_t timeout_ns, CleanupFunc clean);
+
/**
* Wake a coroutine if it is sleeping in qemu_co_sleep_ns. The timer will be
* deleted. @sleep_state must be the variable whose address was given to
* qemu_co_sleep_ns() and should be checked to be non-NULL before calling
* qemu_co_sleep_wake().
*/
-void qemu_co_sleep_wake(QemuCoSleepState *sleep_state);
+void qemu_co_sleep_wake(QemuCoSleep *w);
/**
* Yield until a file descriptor becomes readable
@@ -319,6 +271,41 @@ void qemu_co_sleep_wake(QemuCoSleepState *sleep_state);
*/
void coroutine_fn yield_until_fd_readable(int fd);
+/**
+ * Increase coroutine pool size
+ */
+void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size);
+
+/**
+ * Decrease coroutine pool size
+ */
+void qemu_coroutine_dec_pool_size(unsigned int additional_pool_size);
+
#include "qemu/lockable.h"
+/**
+ * Sends a (part of) iovec down a socket, yielding when the socket is full, or
+ * Receives data into a (part of) iovec from a socket,
+ * yielding when there is no data in the socket.
+ * The same interface as qemu_sendv_recvv(), with added yielding.
+ * XXX should mark these as coroutine_fn
+ */
+ssize_t coroutine_fn qemu_co_sendv_recvv(int sockfd, struct iovec *iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes, bool do_send);
+#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
+
+/**
+ * The same as above, but with just a single buffer
+ */
+ssize_t coroutine_fn qemu_co_send_recv(int sockfd, void *buf, size_t bytes,
+ bool do_send);
+#define qemu_co_recv(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, false)
+#define qemu_co_send(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, true)
+
#endif /* QEMU_COROUTINE_H */
diff --git a/include/qemu/cpu-float.h b/include/qemu/cpu-float.h
new file mode 100644
index 0000000000..a26b9faf68
--- /dev/null
+++ b/include/qemu/cpu-float.h
@@ -0,0 +1,64 @@
+#ifndef QEMU_CPU_FLOAT_H
+#define QEMU_CPU_FLOAT_H
+
+#include "fpu/softfloat-types.h"
+
+/* Unions for reinterpreting between floats and integers. */
+
+typedef union {
+ float32 f;
+ uint32_t l;
+} CPU_FloatU;
+
+typedef union {
+ float64 d;
+#if HOST_BIG_ENDIAN
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } l;
+#else
+ struct {
+ uint32_t lower;
+ uint32_t upper;
+ } l;
+#endif
+ uint64_t ll;
+} CPU_DoubleU;
+
+typedef union {
+ floatx80 d;
+ struct {
+ uint64_t lower;
+ uint16_t upper;
+ } l;
+} CPU_LDoubleU;
+
+typedef union {
+ float128 q;
+#if HOST_BIG_ENDIAN
+ struct {
+ uint32_t upmost;
+ uint32_t upper;
+ uint32_t lower;
+ uint32_t lowest;
+ } l;
+ struct {
+ uint64_t upper;
+ uint64_t lower;
+ } ll;
+#else
+ struct {
+ uint32_t lowest;
+ uint32_t lower;
+ uint32_t upper;
+ uint32_t upmost;
+ } l;
+ struct {
+ uint64_t lower;
+ uint64_t upper;
+ } ll;
+#endif
+} CPU_QuadU;
+
+#endif /* QEMU_CPU_FLOAT_H */
diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h
index 09fc245b91..b11161555b 100644
--- a/include/qemu/cpuid.h
+++ b/include/qemu/cpuid.h
@@ -25,6 +25,9 @@
#endif
/* Leaf 1, %ecx */
+#ifndef bit_PCLMUL
+#define bit_PCLMUL (1 << 1)
+#endif
#ifndef bit_SSE4_1
#define bit_SSE4_1 (1 << 19)
#endif
@@ -45,16 +48,55 @@
#ifndef bit_AVX2
#define bit_AVX2 (1 << 5)
#endif
-#ifndef bit_AVX512F
-#define bit_AVX512F (1 << 16)
-#endif
#ifndef bit_BMI2
#define bit_BMI2 (1 << 8)
#endif
+#ifndef bit_AVX512F
+#define bit_AVX512F (1 << 16)
+#endif
+#ifndef bit_AVX512DQ
+#define bit_AVX512DQ (1 << 17)
+#endif
+#ifndef bit_AVX512BW
+#define bit_AVX512BW (1 << 30)
+#endif
+#ifndef bit_AVX512VL
+#define bit_AVX512VL (1u << 31)
+#endif
+
+/* Leaf 7, %ecx */
+#ifndef bit_AVX512VBMI2
+#define bit_AVX512VBMI2 (1 << 6)
+#endif
/* Leaf 0x80000001, %ecx */
#ifndef bit_LZCNT
#define bit_LZCNT (1 << 5)
#endif
+/*
+ * Signatures for different CPU implementations as returned from Leaf 0.
+ */
+
+#ifndef signature_INTEL_ecx
+/* "Genu" "ineI" "ntel" */
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_edx 0x49656e69
+#define signature_INTEL_ecx 0x6c65746e
+#endif
+
+#ifndef signature_AMD_ecx
+/* "Auth" "enti" "cAMD" */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_edx 0x69746e65
+#define signature_AMD_ecx 0x444d4163
+#endif
+
+static inline unsigned xgetbv_low(unsigned c)
+{
+ unsigned a, d;
+ asm("xgetbv" : "=a"(a), "=d"(d) : "c"(c));
+ return a;
+}
+
#endif /* QEMU_CPUID_H */
diff --git a/include/qemu/crc-ccitt.h b/include/qemu/crc-ccitt.h
new file mode 100644
index 0000000000..8918dafe07
--- /dev/null
+++ b/include/qemu/crc-ccitt.h
@@ -0,0 +1,33 @@
+/*
+ * CRC16 (CCITT) Checksum Algorithm
+ *
+ * Copyright (c) 2021 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * From Linux kernel v5.10 include/linux/crc-ccitt.h
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef CRC_CCITT_H
+#define CRC_CCITT_H
+
+extern uint16_t const crc_ccitt_table[256];
+extern uint16_t const crc_ccitt_false_table[256];
+
+uint16_t crc_ccitt(uint16_t crc, const uint8_t *buffer, size_t len);
+uint16_t crc_ccitt_false(uint16_t crc, const uint8_t *buffer, size_t len);
+
+static inline uint16_t crc_ccitt_byte(uint16_t crc, const uint8_t c)
+{
+ return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
+}
+
+static inline uint16_t crc_ccitt_false_byte(uint16_t crc, const uint8_t c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
+#endif /* CRC_CCITT_H */
diff --git a/include/qemu/crc32c.h b/include/qemu/crc32c.h
index 5b78884c38..88b4d2b3b3 100644
--- a/include/qemu/crc32c.h
+++ b/include/qemu/crc32c.h
@@ -30,5 +30,6 @@
uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length);
+uint32_t iov_crc32c(uint32_t crc, const struct iovec *iov, size_t iov_cnt);
#endif
diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h
index eb59852dfd..92c927a6a3 100644
--- a/include/qemu/cutils.h
+++ b/include/qemu/cutils.h
@@ -1,6 +1,24 @@
#ifndef QEMU_CUTILS_H
#define QEMU_CUTILS_H
+/*
+ * si_prefix:
+ * @exp10: exponent of 10, a multiple of 3 between -18 and 18 inclusive.
+ *
+ * Return a SI prefix (n, u, m, K, M, etc.) corresponding
+ * to the given exponent of 10.
+ */
+const char *si_prefix(unsigned int exp10);
+
+/*
+ * iec_binary_prefix:
+ * @exp2: exponent of 2, a multiple of 10 between 0 and 60 inclusive.
+ *
+ * Return an IEC binary prefix (Ki, Mi, etc.) corresponding
+ * to the given exponent of 2.
+ */
+const char *iec_binary_prefix(unsigned int exp2);
+
/**
* pstrcpy:
* @buf: buffer to copy string into
@@ -129,9 +147,6 @@ static inline const char *qemu_strchrnul(const char *s, int c)
const char *qemu_strchrnul(const char *s, int c);
#endif
time_t mktimegm(struct tm *tm);
-int qemu_fdatasync(int fd);
-int qemu_msync(void *addr, size_t length, int fd);
-int fcntl_setfl(int fd, int flag);
int qemu_parse_fd(const char *param);
int qemu_strtoi(const char *nptr, const char **endptr, int base,
int *result);
@@ -148,9 +163,8 @@ int qemu_strtou64(const char *nptr, const char **endptr, int base,
int qemu_strtod(const char *nptr, const char **endptr, double *result);
int qemu_strtod_finite(const char *nptr, const char **endptr, double *result);
-int parse_uint(const char *s, unsigned long long *value, char **endptr,
- int base);
-int parse_uint_full(const char *s, unsigned long long *value, int base);
+int parse_uint(const char *s, const char **endptr, int base, uint64_t *value);
+int parse_uint_full(const char *s, int base, uint64_t *value);
int qemu_strtosz(const char *nptr, const char **end, uint64_t *result);
int qemu_strtosz_MiB(const char *nptr, const char **end, uint64_t *result);
@@ -158,6 +172,18 @@ int qemu_strtosz_metric(const char *nptr, const char **end, uint64_t *result);
char *size_to_str(uint64_t val);
+/**
+ * freq_to_str:
+ * @freq_hz: frequency to stringify
+ *
+ * Return human readable string for frequency @freq_hz.
+ * Use SI units like KHz, MHz, and so forth.
+ *
+ * The caller is responsible for releasing the value returned
+ * with g_free() after use.
+ */
+char *freq_to_str(uint64_t freq_hz);
+
/* used to print char* safely */
#define STR_OR_NULL(str) ((str) ? (str) : "null")
@@ -184,4 +210,61 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n);
*/
int qemu_pstrcmp0(const char **str1, const char **str2);
+/* Find program directory, and save it for later usage with
+ * qemu_get_exec_dir().
+ * Try OS specific API first, if not working, parse from argv0. */
+void qemu_init_exec_dir(const char *argv0);
+
+/* Get the saved exec dir. */
+const char *qemu_get_exec_dir(void);
+
+/**
+ * get_relocated_path:
+ * @dir: the directory (typically a `CONFIG_*DIR` variable) to be relocated.
+ *
+ * Returns a path for @dir that uses the directory of the running executable
+ * as the prefix.
+ *
+ * When a directory named `qemu-bundle` exists in the directory of the running
+ * executable, the path to the directory will be prepended to @dir. For
+ * example, if the directory of the running executable is `/qemu/build` @dir
+ * is `/usr/share/qemu`, the result will be
+ * `/qemu/build/qemu-bundle/usr/share/qemu`. The directory is expected to exist
+ * in the build tree.
+ *
+ * Otherwise, the directory of the running executable will be used as the
+ * prefix and it appends the relative path from `bindir` to @dir. For example,
+ * if the directory of the running executable is `/opt/qemu/bin`, `bindir` is
+ * `/usr/bin` and @dir is `/usr/share/qemu`, the result will be
+ * `/opt/qemu/bin/../share/qemu`.
+ *
+ * The returned string should be freed by the caller.
+ */
+char *get_relocated_path(const char *dir);
+
+static inline const char *yes_no(bool b)
+{
+ return b ? "yes" : "no";
+}
+
+/*
+ * helper to parse debug environment variables
+ */
+int parse_debug_env(const char *name, int max, int initial);
+
+/*
+ * Hexdump a line of a byte buffer into a hexadecimal/ASCII buffer
+ */
+#define QEMU_HEXDUMP_LINE_BYTES 16 /* Number of bytes to dump */
+#define QEMU_HEXDUMP_LINE_LEN 75 /* Number of characters in line */
+void qemu_hexdump_line(char *line, unsigned int b, const void *bufptr,
+ unsigned int len, bool ascii);
+
+/*
+ * Hexdump a buffer to a file. An optional string prefix is added to every line
+ */
+
+void qemu_hexdump(FILE *fp, const char *prefix,
+ const void *bufptr, size_t size);
+
#endif
diff --git a/include/qemu/datadir.h b/include/qemu/datadir.h
new file mode 100644
index 0000000000..21f9097f58
--- /dev/null
+++ b/include/qemu/datadir.h
@@ -0,0 +1,28 @@
+#ifndef QEMU_DATADIR_H
+#define QEMU_DATADIR_H
+
+#define QEMU_FILE_TYPE_BIOS 0
+#define QEMU_FILE_TYPE_KEYMAP 1
+/**
+ * qemu_find_file:
+ * @type: QEMU_FILE_TYPE_BIOS (for BIOS, VGA BIOS)
+ * or QEMU_FILE_TYPE_KEYMAP (for keymaps).
+ * @name: Relative or absolute file name
+ *
+ * If @name exists on disk as an absolute path, or a path relative
+ * to the current directory, then returns @name unchanged.
+ * Otherwise searches for @name file in the data directories, either
+ * configured at build time (DATADIR) or registered with the -L command
+ * line option.
+ *
+ * The caller must use g_free() to free the returned data when it is
+ * no longer required.
+ *
+ * Returns: a path that can access @name, or NULL if no matching file exists.
+ */
+char *qemu_find_file(int type, const char *name);
+void qemu_add_default_firmwarepath(void);
+void qemu_add_data_dir(char *path);
+void qemu_list_data_dirs(void);
+
+#endif
diff --git a/include/qemu/dbus.h b/include/qemu/dbus.h
index 9d591f9ee4..81d3de8a5a 100644
--- a/include/qemu/dbus.h
+++ b/include/qemu/dbus.h
@@ -12,6 +12,29 @@
#include <gio/gio.h>
+#include "qom/object.h"
+#include "chardev/char.h"
+#include "qemu/notify.h"
+
+/* glib/gio 2.68 */
+#define DBUS_METHOD_INVOCATION_HANDLED TRUE
+#define DBUS_METHOD_INVOCATION_UNHANDLED FALSE
+
+/* in msec */
+#define DBUS_DEFAULT_TIMEOUT 1000
+
+#define DBUS_DISPLAY1_ROOT "/org/qemu/Display1"
+
+#define DBUS_DISPLAY_ERROR (dbus_display_error_quark())
+GQuark dbus_display_error_quark(void);
+
+typedef enum {
+ DBUS_DISPLAY_ERROR_FAILED,
+ DBUS_DISPLAY_ERROR_INVALID,
+ DBUS_DISPLAY_ERROR_UNSUPPORTED,
+ DBUS_DISPLAY_N_ERRORS,
+} DBusDisplayError;
+
GStrv qemu_dbus_get_queued_owners(GDBusConnection *connection,
const char *name,
Error **errp);
diff --git a/include/qemu/defer-call.h b/include/qemu/defer-call.h
new file mode 100644
index 0000000000..e2c1d24572
--- /dev/null
+++ b/include/qemu/defer-call.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Deferred calls
+ *
+ * Copyright Red Hat.
+ */
+
+#ifndef QEMU_DEFER_CALL_H
+#define QEMU_DEFER_CALL_H
+
+/* See documentation in util/defer-call.c */
+void defer_call_begin(void);
+void defer_call_end(void);
+void defer_call(void (*fn)(void *), void *opaque);
+
+#endif /* QEMU_DEFER_CALL_H */
diff --git a/include/qemu/envlist.h b/include/qemu/envlist.h
index b9addcc11f..6006dfae44 100644
--- a/include/qemu/envlist.h
+++ b/include/qemu/envlist.h
@@ -1,10 +1,6 @@
#ifndef ENVLIST_H
#define ENVLIST_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
typedef struct envlist envlist_t;
envlist_t *envlist_create(void);
@@ -15,8 +11,4 @@ int envlist_parse_set(envlist_t *, const char *);
int envlist_parse_unset(envlist_t *, const char *);
char **envlist_to_environ(const envlist_t *, size_t *);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* ENVLIST_H */
diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h
index a5ad95ff1b..3ae2357fda 100644
--- a/include/qemu/error-report.h
+++ b/include/qemu/error-report.h
@@ -30,23 +30,21 @@ void loc_set_none(void);
void loc_set_cmdline(char **argv, int idx, int cnt);
void loc_set_file(const char *fname, int lno);
-int error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-int error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-int error_vprintf_unless_qmp(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-int error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+int error_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int error_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
-void error_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void warn_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-void info_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
+void error_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+void warn_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+void info_vreport(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
-void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void warn_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void info_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void error_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+void warn_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
+void info_report(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
bool error_report_once_cond(bool *printed, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
bool warn_report_once_cond(bool *printed, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 3);
void error_init(const char *argv0);
@@ -72,9 +70,7 @@ void error_init(const char *argv0);
fmt, ##__VA_ARGS__); \
})
-const char *error_get_progname(void);
-
-extern bool error_with_timestamp;
+extern bool message_with_timestamp;
extern bool error_with_guestname;
extern const char *error_guest_name;
diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h
index 3380b662f3..8a4ff308e1 100644
--- a/include/qemu/event_notifier.h
+++ b/include/qemu/event_notifier.h
@@ -24,6 +24,7 @@ struct EventNotifier {
#else
int rfd;
int wfd;
+ bool initialized;
#endif
};
@@ -37,6 +38,7 @@ int event_notifier_test_and_clear(EventNotifier *);
#ifdef CONFIG_POSIX
void event_notifier_init_fd(EventNotifier *, int fd);
int event_notifier_get_fd(const EventNotifier *);
+int event_notifier_get_wfd(const EventNotifier *);
#else
HANDLE event_notifier_get_handle(EventNotifier *);
#endif
diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h
index 489c354291..c6295c6ff0 100644
--- a/include/qemu/fifo8.h
+++ b/include/qemu/fifo8.h
@@ -46,7 +46,7 @@ void fifo8_push(Fifo8 *fifo, uint8_t data);
* fifo8_push_all:
* @fifo: FIFO to push to
* @data: data to push
- * @size: number of bytes to push
+ * @num: number of bytes to push
*
* Push a byte array to the FIFO. Behaviour is undefined if the FIFO is full.
* Clients are responsible for checking the space left in the FIFO using
@@ -71,7 +71,7 @@ uint8_t fifo8_pop(Fifo8 *fifo);
* fifo8_pop_buf:
* @fifo: FIFO to pop from
* @max: maximum number of bytes to pop
- * @num: actual number of returned bytes
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
*
* Pop a number of elements from the FIFO up to a maximum of max. The buffer
* containing the popped data is returned. This buffer points directly into
@@ -82,16 +82,43 @@ uint8_t fifo8_pop(Fifo8 *fifo);
* around in the ring buffer; in this case only a contiguous part of the data
* is returned.
*
- * The number of valid bytes returned is populated in *num; will always return
- * at least 1 byte. max must not be 0 or greater than the number of bytes in
- * the FIFO.
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
*
* Clients are responsible for checking the availability of requested data
* using fifo8_num_used().
*
* Returns: A pointer to popped data.
*/
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num);
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+
+/**
+ * fifo8_peek_buf: read upto max bytes from the fifo
+ * @fifo: FIFO to read from
+ * @max: maximum number of bytes to peek
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
+ *
+ * Peek into a number of elements from the FIFO up to a maximum of max.
+ * The buffer containing the data peeked into is returned. This buffer points
+ * directly into the FIFO backing store. Since data is invalidated once any
+ * of the fifo8_* APIs are called on the FIFO, it is the caller responsibility
+ * to access it before doing further API calls.
+ *
+ * The function may return fewer bytes than requested when the data wraps
+ * around in the ring buffer; in this case only a contiguous part of the data
+ * is returned.
+ *
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
+ *
+ * Clients are responsible for checking the availability of requested data
+ * using fifo8_num_used().
+ *
+ * Returns: A pointer to peekable data.
+ */
+const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
/**
* fifo8_reset:
@@ -148,12 +175,16 @@ uint32_t fifo8_num_used(Fifo8 *fifo);
extern const VMStateDescription vmstate_fifo8;
-#define VMSTATE_FIFO8(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(Fifo8), \
- .vmsd = &vmstate_fifo8, \
- .flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, Fifo8), \
+#define VMSTATE_FIFO8_TEST(_field, _state, _test) { \
+ .name = (stringify(_field)), \
+ .field_exists = (_test), \
+ .size = sizeof(Fifo8), \
+ .vmsd = &vmstate_fifo8, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, Fifo8), \
}
+#define VMSTATE_FIFO8(_field, _state) \
+ VMSTATE_FIFO8_TEST(_field, _state, NULL)
+
#endif /* QEMU_FIFO8_H */
diff --git a/include/qemu/filemonitor.h b/include/qemu/filemonitor.h
index a41ceb0244..8715d5c048 100644
--- a/include/qemu/filemonitor.h
+++ b/include/qemu/filemonitor.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/qemu/guest-random.h b/include/qemu/guest-random.h
index 09ff9c2236..5060d49d60 100644
--- a/include/qemu/guest-random.h
+++ b/include/qemu/guest-random.h
@@ -13,16 +13,16 @@
#define QEMU_GUEST_RANDOM_H
/**
- * qemu_guest_random_seed_main(const char *optarg, Error **errp)
- * @optarg: a non-NULL pointer to a C string
+ * qemu_guest_random_seed_main(const char *seedstr, Error **errp)
+ * @seedstr: a non-NULL pointer to a C string
* @errp: an error indicator
*
- * The @optarg value is that which accompanies the -seed argument.
+ * The @seedstr value is that which accompanies the -seed argument.
* This forces qemu_guest_getrandom into deterministic mode.
*
* Returns 0 on success, < 0 on failure while setting *errp.
*/
-int qemu_guest_random_seed_main(const char *optarg, Error **errp);
+int qemu_guest_random_seed_main(const char *seedstr, Error **errp);
/**
* qemu_guest_random_seed_thread_part1(void)
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index 5e71b6d6f7..8136e33674 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -76,20 +76,9 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size);
*
* Store result of merging @a and @b into @result.
* @result is allowed to be equal to @a or @b.
- *
- * Return true if the merge was successful,
- * false if it was not attempted.
- */
-bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result);
-
-/**
- * hbitmap_can_merge:
- *
- * hbitmap_can_merge(a, b) && hbitmap_can_merge(a, result) is sufficient and
- * necessary for hbitmap_merge will not fail.
- *
+ * All bitmaps must have same size.
*/
-bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b);
+void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result);
/**
* hbitmap_empty:
@@ -340,6 +329,18 @@ bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,
int64_t max_dirty_count,
int64_t *dirty_start, int64_t *dirty_count);
+/*
+ * hbitmap_status:
+ * @hb: The HBitmap to operate on
+ * @start: The bit to start from
+ * @count: Number of bits to proceed
+ * @pnum: Out-parameter. How many bits has same value starting from @start
+ *
+ * Returns true if bitmap is dirty at @start, false otherwise.
+ */
+bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,
+ int64_t *pnum);
+
/**
* hbitmap_iter_next:
* @hbi: HBitmapIter to operate on.
diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h
new file mode 100644
index 0000000000..353ab2ad8b
--- /dev/null
+++ b/include/qemu/help-texts.h
@@ -0,0 +1,13 @@
+#ifndef QEMU_HELP_TEXTS_H
+#define QEMU_HELP_TEXTS_H
+
+/* Copyright string for -version arguments, About dialogs, etc */
+#define QEMU_COPYRIGHT "Copyright (c) 2003-2024 " \
+ "Fabrice Bellard and the QEMU Project developers"
+
+/* Bug reporting information for --help arguments, About dialogs, etc */
+#define QEMU_HELP_BOTTOM \
+ "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \
+ "More information on the QEMU project at <https://qemu.org>."
+
+#endif
diff --git a/include/qemu/help_option.h b/include/qemu/help_option.h
index 328d2a89fd..ca6389a154 100644
--- a/include/qemu/help_option.h
+++ b/include/qemu/help_option.h
@@ -19,4 +19,15 @@ static inline bool is_help_option(const char *s)
return !strcmp(s, "?") || !strcmp(s, "help");
}
+static inline int starts_with_help_option(const char *s)
+{
+ if (*s == '?') {
+ return 1;
+ }
+ if (g_str_has_prefix(s, "help")) {
+ return 4;
+ }
+ return 0;
+}
+
#endif
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index cdca2991d8..ead97d354d 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -23,10 +23,15 @@
* THE SOFTWARE.
*/
+/* Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
#ifndef HOST_UTILS_H
#define HOST_UTILS_H
#include "qemu/bswap.h"
+#include "qemu/int128.h"
#ifdef CONFIG_INT128
static inline void mulu64(uint64_t *plow, uint64_t *phigh,
@@ -51,43 +56,45 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
return (__int128_t)a * b / c;
}
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
+static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
{
- if (divisor == 0) {
- return 1;
- } else {
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
- __uint128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result > UINT64_MAX;
- }
+ return ((__int128_t)a * b + c - 1) / c;
}
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
+ uint64_t divisor)
{
- if (divisor == 0) {
- return 1;
- } else {
- __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
- __int128_t result = dividend / divisor;
- *plow = result;
- *phigh = dividend % divisor;
- return result != *plow;
- }
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
+ __uint128_t result = dividend / divisor;
+
+ *plow = result;
+ *phigh = result >> 64;
+ return dividend % divisor;
+}
+
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
+ int64_t divisor)
+{
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
+ __int128_t result = dividend / divisor;
+
+ *plow = result;
+ *phigh = result >> 64;
+ return dividend % divisor;
}
#else
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
-static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+static inline uint64_t muldiv64_rounding(uint64_t a, uint32_t b, uint32_t c,
+ bool round_up)
{
union {
uint64_t ll;
struct {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
uint32_t high, low;
#else
uint32_t low, high;
@@ -98,15 +105,58 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
u.ll = a;
rl = (uint64_t)u.l.low * (uint64_t)b;
+ if (round_up) {
+ rl += c - 1;
+ }
rh = (uint64_t)u.l.high * (uint64_t)b;
rh += (rl >> 32);
res.l.high = rh / c;
res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
return res.ll;
}
+
+static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+{
+ return muldiv64_rounding(a, b, c, false);
+}
+
+static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
+{
+ return muldiv64_rounding(a, b, c, true);
+}
#endif
/**
+ * clz8 - count leading zeros in a 8-bit value.
+ * @val: The value to search
+ *
+ * Returns 8 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ *
+ * Note that the GCC builtin will upcast its argument to an `unsigned int`
+ * so this function subtracts off the number of prepended zeroes.
+ */
+static inline int clz8(uint8_t val)
+{
+ return val ? __builtin_clz(val) - 24 : 8;
+}
+
+/**
+ * clz16 - count leading zeros in a 16-bit value.
+ * @val: The value to search
+ *
+ * Returns 16 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ *
+ * Note that the GCC builtin will upcast its argument to an `unsigned int`
+ * so this function subtracts off the number of prepended zeroes.
+ */
+static inline int clz16(uint16_t val)
+{
+ return val ? __builtin_clz(val) - 16 : 16;
+}
+
+/**
* clz32 - count leading zeros in a 32-bit value.
* @val: The value to search
*
@@ -153,6 +203,30 @@ static inline int clo64(uint64_t val)
}
/**
+ * ctz8 - count trailing zeros in a 8-bit value.
+ * @val: The value to search
+ *
+ * Returns 8 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ */
+static inline int ctz8(uint8_t val)
+{
+ return val ? __builtin_ctz(val) : 8;
+}
+
+/**
+ * ctz16 - count trailing zeros in a 16-bit value.
+ * @val: The value to search
+ *
+ * Returns 16 if the value is zero. Note that the GCC builtin is
+ * undefined if the value is zero.
+ */
+static inline int ctz16(uint16_t val)
+{
+ return val ? __builtin_ctz(val) : 16;
+}
+
+/**
* ctz32 - count trailing zeros in a 32-bit value.
* @val: The value to search
*
@@ -272,6 +346,9 @@ static inline int ctpop64(uint64_t val)
*/
static inline uint8_t revbit8(uint8_t x)
{
+#if __has_builtin(__builtin_bitreverse8)
+ return __builtin_bitreverse8(x);
+#else
/* Assign the correct nibble position. */
x = ((x & 0xf0) >> 4)
| ((x & 0x0f) << 4);
@@ -281,6 +358,7 @@ static inline uint8_t revbit8(uint8_t x)
| ((x & 0x22) << 1)
| ((x & 0x11) << 3);
return x;
+#endif
}
/**
@@ -289,6 +367,9 @@ static inline uint8_t revbit8(uint8_t x)
*/
static inline uint16_t revbit16(uint16_t x)
{
+#if __has_builtin(__builtin_bitreverse16)
+ return __builtin_bitreverse16(x);
+#else
/* Assign the correct byte position. */
x = bswap16(x);
/* Assign the correct nibble position. */
@@ -300,6 +381,7 @@ static inline uint16_t revbit16(uint16_t x)
| ((x & 0x2222) << 1)
| ((x & 0x1111) << 3);
return x;
+#endif
}
/**
@@ -308,6 +390,9 @@ static inline uint16_t revbit16(uint16_t x)
*/
static inline uint32_t revbit32(uint32_t x)
{
+#if __has_builtin(__builtin_bitreverse32)
+ return __builtin_bitreverse32(x);
+#else
/* Assign the correct byte position. */
x = bswap32(x);
/* Assign the correct nibble position. */
@@ -319,6 +404,7 @@ static inline uint32_t revbit32(uint32_t x)
| ((x & 0x22222222u) << 1)
| ((x & 0x11111111u) << 3);
return x;
+#endif
}
/**
@@ -327,6 +413,9 @@ static inline uint32_t revbit32(uint32_t x)
*/
static inline uint64_t revbit64(uint64_t x)
{
+#if __has_builtin(__builtin_bitreverse64)
+ return __builtin_bitreverse64(x);
+#else
/* Assign the correct byte position. */
x = bswap64(x);
/* Assign the correct nibble position. */
@@ -338,6 +427,259 @@ static inline uint64_t revbit64(uint64_t x)
| ((x & 0x2222222222222222ull) << 1)
| ((x & 0x1111111111111111ull) << 3);
return x;
+#endif
+}
+
+/**
+ * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
+ */
+static inline uint64_t uabs64(int64_t v)
+{
+ return v < 0 ? -v : v;
+}
+
+/**
+ * sadd32_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * sadd64_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * uadd32_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * uadd64_overflow - addition with overflow indication
+ * @x, @y: addends
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x + @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_add_overflow(x, y, ret);
+}
+
+/**
+ * ssub32_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for difference
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * ssub64_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * usub32_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * usub64_overflow - subtraction with overflow indication
+ * @x: Minuend
+ * @y: Subtrahend
+ * @ret: Output for sum
+ *
+ * Computes *@ret = @x - @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_sub_overflow(x, y, ret);
+}
+
+/**
+ * smul32_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * smul64_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * umul32_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/**
+ * umul64_overflow - multiplication with overflow indication
+ * @x, @y: Input multipliers
+ * @ret: Output for product
+ *
+ * Computes *@ret = @x * @y, and returns true if and only if that
+ * value has been truncated.
+ */
+static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
+{
+ return __builtin_mul_overflow(x, y, ret);
+}
+
+/*
+ * Unsigned 128x64 multiplication.
+ * Returns true if the result got truncated to 128 bits.
+ * Otherwise, returns false and the multiplication result via plow and phigh.
+ */
+static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
+{
+#if defined(CONFIG_INT128)
+ bool res;
+ __uint128_t r;
+ __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
+ res = __builtin_mul_overflow(f, factor, &r);
+
+ *plow = r;
+ *phigh = r >> 64;
+
+ return res;
+#else
+ uint64_t dhi = *phigh;
+ uint64_t dlo = *plow;
+ uint64_t ahi;
+ uint64_t blo, bhi;
+
+ if (dhi == 0) {
+ mulu64(plow, phigh, dlo, factor);
+ return false;
+ }
+
+ mulu64(plow, &ahi, dlo, factor);
+ mulu64(&blo, &bhi, dhi, factor);
+
+ return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
+#endif
+}
+
+/**
+ * uadd64_carry - addition with carry-in and carry-out
+ * @x, @y: addends
+ * @pcarry: in-out carry value
+ *
+ * Computes @x + @y + *@pcarry, placing the carry-out back
+ * into *@pcarry and returning the 64-bit sum.
+ */
+static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
+{
+#if __has_builtin(__builtin_addcll)
+ unsigned long long c = *pcarry;
+ x = __builtin_addcll(x, y, c, &c);
+ *pcarry = c & 1;
+ return x;
+#else
+ bool c = *pcarry;
+ /* This is clang's internal expansion of __builtin_addc. */
+ c = uadd64_overflow(x, c, &x);
+ c |= uadd64_overflow(x, y, &x);
+ *pcarry = c;
+ return x;
+#endif
+}
+
+/**
+ * usub64_borrow - subtraction with borrow-in and borrow-out
+ * @x, @y: addends
+ * @pborrow: in-out borrow value
+ *
+ * Computes @x - @y - *@pborrow, placing the borrow-out back
+ * into *@pborrow and returning the 64-bit sum.
+ */
+static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
+{
+#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
+ unsigned long long b = *pborrow;
+ x = __builtin_subcll(x, y, b, &b);
+ *pborrow = b & 1;
+ return x;
+#else
+ bool b = *pborrow;
+ b = usub64_overflow(x, b, &x);
+ b |= usub64_overflow(x, y, &x);
+ *pborrow = b;
+ return x;
+#endif
}
/* Host type specific sizes of these routines. */
@@ -437,4 +779,83 @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
*/
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
+ *
+ * Licensed under the GPLv2/LGPLv3
+ */
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
+ uint64_t n0, uint64_t d)
+{
+#if defined(__x86_64__)
+ uint64_t q;
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
+ return q;
+#elif defined(__s390x__) && !defined(__clang__)
+ /* Need to use a TImode type to get an even register pair for DLGR. */
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
+ *r = n >> 64;
+ return n;
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
+ /* From Power ISA 2.06, programming note for divdeu. */
+ uint64_t q1, q2, Q, r1, r2, R;
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
+ : "=&r"(q1), "=r"(q2)
+ : "r"(n1), "r"(n0), "r"(d));
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
+ r2 = n0 - (q2 * d);
+ Q = q1 + q2;
+ R = r1 + r2;
+ if (R >= d || R < r2) { /* overflow implies R > d */
+ Q += 1;
+ R -= d;
+ }
+ *r = R;
+ return Q;
+#else
+ uint64_t d0, d1, q0, q1, r1, r0, m;
+
+ d0 = (uint32_t)d;
+ d1 = d >> 32;
+
+ r1 = n1 % d1;
+ q1 = n1 / d1;
+ m = q1 * d0;
+ r1 = (r1 << 32) | (n0 >> 32);
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ if (r1 >= d) {
+ if (r1 < m) {
+ q1 -= 1;
+ r1 += d;
+ }
+ }
+ }
+ r1 -= m;
+
+ r0 = r1 % d1;
+ q0 = r1 / d1;
+ m = q0 * d0;
+ r0 = (r0 << 32) | (uint32_t)n0;
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ if (r0 >= d) {
+ if (r0 < m) {
+ q0 -= 1;
+ r0 += d;
+ }
+ }
+ }
+ r0 -= m;
+
+ *r = r0;
+ return (q1 << 32) | q0;
+#endif
+}
+
+Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
+Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
#endif
diff --git a/include/qemu/hw-version.h b/include/qemu/hw-version.h
new file mode 100644
index 0000000000..730a8c904d
--- /dev/null
+++ b/include/qemu/hw-version.h
@@ -0,0 +1,27 @@
+/*
+ * QEMU "hardware version" machinery
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_HW_VERSION_H
+#define QEMU_HW_VERSION_H
+
+/*
+ * Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default
+ * instead of QEMU_VERSION, so setting hw_version on MachineClass
+ * is no longer mandatory.
+ *
+ * Do NOT change this string, or it will break compatibility on all
+ * machine classes that don't set hw_version.
+ */
+#define QEMU_HW_VERSION "2.5+"
+
+/* QEMU "hardware version" setting. Used to replace code that exposed
+ * QEMU_VERSION to guests in the past and need to keep compatibility.
+ * Do not use qemu_hw_version() in new code.
+ */
+void qemu_set_hw_version(const char *);
+const char *qemu_hw_version(void);
+
+#endif
diff --git a/include/qemu/id.h b/include/qemu/id.h
index b55c406e69..46b759b284 100644
--- a/include/qemu/id.h
+++ b/include/qemu/id.h
@@ -5,6 +5,7 @@ typedef enum IdSubSystems {
ID_QDEV,
ID_BLOCK,
ID_CHR,
+ ID_NET,
ID_MAX /* last element, used as array size */
} IdSubSystems;
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index 5c9890db8b..174bd7dafb 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -1,16 +1,27 @@
#ifndef INT128_H
#define INT128_H
-#ifdef CONFIG_INT128
#include "qemu/bswap.h"
+/*
+ * With TCI, we need to use libffi for interfacing with TCG helpers.
+ * But libffi does not support __int128_t, and therefore cannot pass
+ * or return values of this type, force use of the Int128 struct.
+ */
+#if defined(CONFIG_INT128) && !defined(CONFIG_TCG_INTERPRETER)
typedef __int128_t Int128;
+typedef __int128_t __attribute__((aligned(16))) Int128Aligned;
static inline Int128 int128_make64(uint64_t a)
{
return a;
}
+static inline Int128 int128_makes64(int64_t a)
+{
+ return a;
+}
+
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
{
return (__uint128_t)hi << 64 | lo;
@@ -53,16 +64,41 @@ static inline Int128 int128_exts64(int64_t a)
return a;
}
+static inline Int128 int128_not(Int128 a)
+{
+ return ~a;
+}
+
static inline Int128 int128_and(Int128 a, Int128 b)
{
return a & b;
}
+static inline Int128 int128_or(Int128 a, Int128 b)
+{
+ return a | b;
+}
+
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return a ^ b;
+}
+
static inline Int128 int128_rshift(Int128 a, int n)
{
return a >> n;
}
+static inline Int128 int128_urshift(Int128 a, int n)
+{
+ return (__uint128_t)a >> n;
+}
+
+static inline Int128 int128_lshift(Int128 a, int n)
+{
+ return a << n;
+}
+
static inline Int128 int128_add(Int128 a, Int128 b)
{
return a + b;
@@ -98,11 +134,21 @@ static inline bool int128_ge(Int128 a, Int128 b)
return a >= b;
}
+static inline bool int128_uge(Int128 a, Int128 b)
+{
+ return ((__uint128_t)a) >= ((__uint128_t)b);
+}
+
static inline bool int128_lt(Int128 a, Int128 b)
{
return a < b;
}
+static inline bool int128_ult(Int128 a, Int128 b)
+{
+ return (__uint128_t)a < (__uint128_t)b;
+}
+
static inline bool int128_le(Int128 a, Int128 b)
{
return a <= b;
@@ -140,26 +186,78 @@ static inline void int128_subfrom(Int128 *a, Int128 b)
static inline Int128 bswap128(Int128 a)
{
+#if __has_builtin(__builtin_bswap128)
+ return __builtin_bswap128(a);
+#else
return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a)));
+#endif
+}
+
+static inline int clz128(Int128 a)
+{
+ if (a >> 64) {
+ return __builtin_clzll(a >> 64);
+ } else {
+ return (a) ? __builtin_clzll((uint64_t)a) + 64 : 128;
+ }
+}
+
+static inline Int128 int128_divu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a / (__uint128_t)b;
+}
+
+static inline Int128 int128_remu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a % (__uint128_t)b;
+}
+
+static inline Int128 int128_divs(Int128 a, Int128 b)
+{
+ return a / b;
+}
+
+static inline Int128 int128_rems(Int128 a, Int128 b)
+{
+ return a % b;
}
#else /* !CONFIG_INT128 */
typedef struct Int128 Int128;
-
+typedef struct Int128 __attribute__((aligned(16))) Int128Aligned;
+
+/*
+ * We guarantee that the in-memory byte representation of an
+ * Int128 is that of a host-endian-order 128-bit integer
+ * (whether using this struct or the __int128_t version of the type).
+ * Some code using this type relies on this (eg when copying it into
+ * guest memory or a gdb protocol buffer, or by using Int128 in
+ * a union with other integer types).
+ */
struct Int128 {
+#if HOST_BIG_ENDIAN
+ int64_t hi;
+ uint64_t lo;
+#else
uint64_t lo;
int64_t hi;
+#endif
};
static inline Int128 int128_make64(uint64_t a)
{
- return (Int128) { a, 0 };
+ return (Int128) { .lo = a, .hi = 0 };
+}
+
+static inline Int128 int128_makes64(int64_t a)
+{
+ return (Int128) { .lo = a, .hi = a >> 63 };
}
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
{
- return (Int128) { lo, hi };
+ return (Int128) { .lo = lo, .hi = hi };
}
static inline uint64_t int128_get64(Int128 a)
@@ -190,17 +288,32 @@ static inline Int128 int128_one(void)
static inline Int128 int128_2_64(void)
{
- return (Int128) { 0, 1 };
+ return int128_make128(0, 1);
}
static inline Int128 int128_exts64(int64_t a)
{
- return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 };
+ return int128_make128(a, (a < 0) ? -1 : 0);
+}
+
+static inline Int128 int128_not(Int128 a)
+{
+ return int128_make128(~a.lo, ~a.hi);
}
static inline Int128 int128_and(Int128 a, Int128 b)
{
- return (Int128) { a.lo & b.lo, a.hi & b.hi };
+ return int128_make128(a.lo & b.lo, a.hi & b.hi);
+}
+
+static inline Int128 int128_or(Int128 a, Int128 b)
+{
+ return int128_make128(a.lo | b.lo, a.hi | b.hi);
+}
+
+static inline Int128 int128_xor(Int128 a, Int128 b)
+{
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
}
static inline Int128 int128_rshift(Int128 a, int n)
@@ -217,6 +330,31 @@ static inline Int128 int128_rshift(Int128 a, int n)
}
}
+static inline Int128 int128_urshift(Int128 a, int n)
+{
+ uint64_t h = a.hi;
+ if (!n) {
+ return a;
+ }
+ h = h >> (n & 63);
+ if (n >= 64) {
+ return int128_make64(h);
+ } else {
+ return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h);
+ }
+}
+
+static inline Int128 int128_lshift(Int128 a, int n)
+{
+ uint64_t l = a.lo << (n & 63);
+ if (n >= 64) {
+ return int128_make128(0, l);
+ } else if (n > 0) {
+ return int128_make128(l, (a.hi << n) | (a.lo >> (64 - n)));
+ }
+ return a;
+}
+
static inline Int128 int128_add(Int128 a, Int128 b)
{
uint64_t lo = a.lo + b.lo;
@@ -261,11 +399,21 @@ static inline bool int128_ge(Int128 a, Int128 b)
return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo);
}
+static inline bool int128_uge(Int128 a, Int128 b)
+{
+ return (uint64_t)a.hi > (uint64_t)b.hi || (a.hi == b.hi && a.lo >= b.lo);
+}
+
static inline bool int128_lt(Int128 a, Int128 b)
{
return !int128_ge(a, b);
}
+static inline bool int128_ult(Int128 a, Int128 b)
+{
+ return !int128_uge(a, b);
+}
+
static inline bool int128_le(Int128 a, Int128 b)
{
return int128_ge(b, a);
@@ -301,5 +449,48 @@ static inline void int128_subfrom(Int128 *a, Int128 b)
*a = int128_sub(*a, b);
}
-#endif /* CONFIG_INT128 */
+static inline Int128 bswap128(Int128 a)
+{
+ return int128_make128(bswap64(a.hi), bswap64(a.lo));
+}
+
+static inline int clz128(Int128 a)
+{
+ if (a.hi) {
+ return __builtin_clzll(a.hi);
+ } else {
+ return (a.lo) ? __builtin_clzll(a.lo) + 64 : 128;
+ }
+}
+
+Int128 int128_divu(Int128, Int128);
+Int128 int128_remu(Int128, Int128);
+Int128 int128_divs(Int128, Int128);
+Int128 int128_rems(Int128, Int128);
+#endif /* CONFIG_INT128 && !CONFIG_TCG_INTERPRETER */
+
+static inline void bswap128s(Int128 *s)
+{
+ *s = bswap128(*s);
+}
+
+#define UINT128_MAX int128_make128(~0LL, ~0LL)
+#define INT128_MAX int128_make128(UINT64_MAX, INT64_MAX)
+#define INT128_MIN int128_make128(0, INT64_MIN)
+
+/*
+ * When compiler supports a 128-bit type, define a combination of
+ * a possible structure and the native types. Ease parameter passing
+ * via use of the transparent union extension.
+ */
+#ifdef CONFIG_INT128_TYPE
+typedef union {
+ __uint128_t u;
+ __int128_t i;
+ Int128 s;
+} Int128Alias __attribute__((transparent_union));
+#else
+typedef Int128 Int128Alias;
+#endif /* CONFIG_INT128_TYPE */
+
#endif /* INT128_H */
diff --git a/include/qemu/interval-tree.h b/include/qemu/interval-tree.h
new file mode 100644
index 0000000000..25006debe8
--- /dev/null
+++ b/include/qemu/interval-tree.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Interval trees.
+ *
+ * Derived from include/linux/interval_tree.h and its dependencies.
+ */
+
+#ifndef QEMU_INTERVAL_TREE_H
+#define QEMU_INTERVAL_TREE_H
+
+/*
+ * For now, don't expose Linux Red-Black Trees separately, but retain the
+ * separate type definitions to keep the implementation sane, and allow
+ * the possibility of disentangling them later.
+ */
+typedef struct RBNode
+{
+ /* Encodes parent with color in the lsb. */
+ uintptr_t rb_parent_color;
+ struct RBNode *rb_right;
+ struct RBNode *rb_left;
+} RBNode;
+
+typedef struct RBRoot
+{
+ RBNode *rb_node;
+} RBRoot;
+
+typedef struct RBRootLeftCached {
+ RBRoot rb_root;
+ RBNode *rb_leftmost;
+} RBRootLeftCached;
+
+typedef struct IntervalTreeNode
+{
+ RBNode rb;
+
+ uint64_t start; /* Start of interval */
+ uint64_t last; /* Last location _in_ interval */
+ uint64_t subtree_last;
+} IntervalTreeNode;
+
+typedef RBRootLeftCached IntervalTreeRoot;
+
+/**
+ * interval_tree_is_empty
+ * @root: root of the tree.
+ *
+ * Returns true if the tree contains no nodes.
+ */
+static inline bool interval_tree_is_empty(const IntervalTreeRoot *root)
+{
+ return root->rb_root.rb_node == NULL;
+}
+
+/**
+ * interval_tree_insert
+ * @node: node to insert,
+ * @root: root of the tree.
+ *
+ * Insert @node into @root, and rebalance.
+ */
+void interval_tree_insert(IntervalTreeNode *node, IntervalTreeRoot *root);
+
+/**
+ * interval_tree_remove
+ * @node: node to remove,
+ * @root: root of the tree.
+ *
+ * Remove @node from @root, and rebalance.
+ */
+void interval_tree_remove(IntervalTreeNode *node, IntervalTreeRoot *root);
+
+/**
+ * interval_tree_iter_first:
+ * @root: root of the tree,
+ * @start, @last: the inclusive interval [start, last].
+ *
+ * Locate the "first" of a set of nodes within the tree at @root
+ * that overlap the interval, where "first" is sorted by start.
+ * Returns NULL if no overlap found.
+ */
+IntervalTreeNode *interval_tree_iter_first(IntervalTreeRoot *root,
+ uint64_t start, uint64_t last);
+
+/**
+ * interval_tree_iter_next:
+ * @node: previous search result
+ * @start, @last: the inclusive interval [start, last].
+ *
+ * Locate the "next" of a set of nodes within the tree that overlap the
+ * interval; @next is the result of a previous call to
+ * interval_tree_iter_{first,next}. Returns NULL if @next was the last
+ * node in the set.
+ */
+IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
+ uint64_t start, uint64_t last);
+
+#endif /* QEMU_INTERVAL_TREE_H */
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index bffc151282..63a1c01965 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -130,6 +130,29 @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
size_t bytes);
+/* Information needed to undo an iov_discard_*() operation */
+typedef struct {
+ struct iovec *modified_iov;
+ struct iovec orig;
+} IOVDiscardUndo;
+
+/*
+ * Undo an iov_discard_front_undoable() or iov_discard_back_undoable()
+ * operation. If multiple operations are made then each one needs a separate
+ * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order
+ * that the operations were made.
+ */
+void iov_discard_undo(IOVDiscardUndo *undo);
+
+/*
+ * Undoable versions of iov_discard_front() and iov_discard_back(). Use
+ * iov_discard_undo() to reset to the state before the discard operations.
+ */
+size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+
typedef struct QEMUIOVector {
struct iovec *iov;
int niov;
@@ -199,13 +222,11 @@ static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
-void qemu_iovec_init_extended(
- QEMUIOVector *qiov,
- void *head_buf, size_t head_len,
- QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
- void *tail_buf, size_t tail_len);
void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source,
size_t offset, size_t len);
+struct iovec *qemu_iovec_slice(QEMUIOVector *qiov,
+ size_t offset, size_t len,
+ size_t *head, size_t *tail, int *niov);
int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len);
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
void qemu_iovec_concat(QEMUIOVector *dst,
diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h
index b66cf93c4b..2a10a7052e 100644
--- a/include/qemu/iova-tree.h
+++ b/include/qemu/iova-tree.h
@@ -15,7 +15,7 @@
* Currently the iova tree will only allow to keep ranges
* information, and no extra user data is allowed for each element. A
* benefit is that we can merge adjacent ranges internally within the
- * tree. It can save a lot of memory when the ranges are splitted but
+ * tree. It can save a lot of memory when the ranges are split but
* mostly continuous.
*
* Note that current implementation does not provide any thread
@@ -29,6 +29,7 @@
#define IOVA_OK (0)
#define IOVA_ERR_INVALID (-1) /* Invalid parameters */
#define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */
+#define IOVA_ERR_NOMEM (-3) /* Cannot allocate */
typedef struct IOVATree IOVATree;
typedef struct DMAMap {
@@ -59,7 +60,7 @@ IOVATree *iova_tree_new(void);
*
* Return: 0 if succeeded, or <0 if error.
*/
-int iova_tree_insert(IOVATree *tree, DMAMap *map);
+int iova_tree_insert(IOVATree *tree, const DMAMap *map);
/**
* iova_tree_remove:
@@ -71,10 +72,8 @@ int iova_tree_insert(IOVATree *tree, DMAMap *map);
* provided. The range does not need to be exactly what has inserted,
* all the mappings that are included in the provided range will be
* removed from the tree. Here map->translated_addr is meaningless.
- *
- * Return: 0 if succeeded, or <0 if error.
*/
-int iova_tree_remove(IOVATree *tree, DMAMap *map);
+void iova_tree_remove(IOVATree *tree, DMAMap map);
/**
* iova_tree_find:
@@ -82,7 +81,7 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map);
* @tree: the iova tree to search from
* @map: the mapping to search
*
- * Search for a mapping in the iova tree that overlaps with the
+ * Search for a mapping in the iova tree that iova overlaps with the
* mapping range specified. Only the first found mapping will be
* returned.
*
@@ -92,7 +91,25 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map);
* user is responsible to make sure the pointer is valid (say, no
* concurrent deletion in progress).
*/
-DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map);
+const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map);
+
+/**
+ * iova_tree_find_iova:
+ *
+ * @tree: the iova tree to search from
+ * @map: the mapping to search
+ *
+ * Search for a mapping in the iova tree that translated_addr overlaps with the
+ * mapping range specified. Only the first found mapping will be
+ * returned.
+ *
+ * Return: DMAMap pointer if found, or NULL if not found. Note that
+ * the returned DMAMap pointer is maintained internally. User should
+ * only read the content but never modify or free the content. Also,
+ * user is responsible to make sure the pointer is valid (say, no
+ * concurrent deletion in progress).
+ */
+const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map);
/**
* iova_tree_find_address:
@@ -105,13 +122,13 @@ DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map);
*
* Return: same as iova_tree_find().
*/
-DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova);
+const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova);
/**
* iova_tree_foreach:
*
* @tree: the iova tree to iterate on
- * @iterator: the interator for the mappings, return true to stop
+ * @iterator: the iterator for the mappings, return true to stop
*
* Iterate over the iova tree.
*
@@ -120,6 +137,23 @@ DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova);
void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator);
/**
+ * iova_tree_alloc_map:
+ *
+ * @tree: the iova tree to allocate from
+ * @map: the new map (as translated addr & size) to allocate in the iova region
+ * @iova_begin: the minimum address of the allocation
+ * @iova_end: the maximum addressable direction of the allocation
+ *
+ * Allocates a new region of a given size, between iova_min and iova_max.
+ *
+ * Return: Same as iova_tree_insert, but cannot overlap and can return error if
+ * iova tree is out of free contiguous range. The caller gets the assigned iova
+ * in map->iova.
+ */
+int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin,
+ hwaddr iova_end);
+
+/**
* iova_tree_destroy:
*
* @tree: the iova tree to destroy
diff --git a/include/qemu/job.h b/include/qemu/job.h
index 32aabb1c60..2b873f2576 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -40,27 +40,60 @@ typedef struct JobTxn JobTxn;
* Long-running operation.
*/
typedef struct Job {
+
+ /* Fields set at initialization (job_create), and never modified */
+
/** The ID of the job. May be NULL for internal jobs. */
char *id;
- /** The type of this job. */
+ /**
+ * The type of this job.
+ * All callbacks are called with job_mutex *not* held.
+ */
const JobDriver *driver;
- /** Reference count of the block job */
- int refcnt;
-
- /** Current state; See @JobStatus for details. */
- JobStatus status;
-
- /** AioContext to run the job coroutine in */
- AioContext *aio_context;
-
/**
* The coroutine that executes the job. If not NULL, it is reentered when
* busy is false and the job is cancelled.
+ * Initialized in job_start()
*/
Coroutine *co;
+ /** True if this job should automatically finalize itself */
+ bool auto_finalize;
+
+ /** True if this job should automatically dismiss itself */
+ bool auto_dismiss;
+
+ /**
+ * The completion function that will be called when the job completes.
+ */
+ BlockCompletionFunc *cb;
+
+ /** The opaque value that is passed to the completion function. */
+ void *opaque;
+
+ /* ProgressMeter API is thread-safe */
+ ProgressMeter progress;
+
+ /**
+ * AioContext to run the job coroutine in.
+ * The job Aiocontext can be read when holding *either*
+ * the BQL (so we are in the main loop) or the job_mutex.
+ * It can only be written when we hold *both* BQL
+ * and the job_mutex.
+ */
+ AioContext *aio_context;
+
+
+ /** Protected by job_mutex */
+
+ /** Reference count of the block job */
+ int refcnt;
+
+ /** Current state; See @JobStatus for details. */
+ JobStatus status;
+
/**
* Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
* job.c).
@@ -76,7 +109,7 @@ typedef struct Job {
/**
* Set to false by the job while the coroutine has yielded and may be
* re-entered by job_enter(). There may still be I/O or event loop activity
- * pending. Accessed under block_job_mutex (in blockjob.c).
+ * pending. Accessed under job_mutex.
*
* When the job is deferred to the main loop, busy is true as long as the
* bottom half is still pending.
@@ -112,14 +145,6 @@ typedef struct Job {
/** Set to true when the job has deferred work to the main loop. */
bool deferred_to_main_loop;
- /** True if this job should automatically finalize itself */
- bool auto_finalize;
-
- /** True if this job should automatically dismiss itself */
- bool auto_dismiss;
-
- ProgressMeter progress;
-
/**
* Return code from @run and/or @prepare callback(s).
* Not final until the job has reached the CONCLUDED status.
@@ -134,12 +159,6 @@ typedef struct Job {
*/
Error *err;
- /** The completion function that will be called when the job completes. */
- BlockCompletionFunc *cb;
-
- /** The opaque value that is passed to the completion function. */
- void *opaque;
-
/** Notifiers called when a cancelled job is finalised */
NotifierList on_finalize_cancelled;
@@ -167,8 +186,15 @@ typedef struct Job {
/**
* Callbacks and other information about a Job driver.
+ * All callbacks are invoked with job_mutex *not* held.
*/
struct JobDriver {
+
+ /*
+ * These fields are initialized when this object is created,
+ * and are never changed afterwards
+ */
+
/** Derived Job struct size */
size_t instance_size;
@@ -184,9 +210,18 @@ struct JobDriver {
* aborted. If it returns zero, the job moves into the WAITING state. If it
* is the last job to complete in its transaction, all jobs in the
* transaction move from WAITING to PENDING.
+ *
+ * This callback must be run in the job's context.
*/
int coroutine_fn (*run)(Job *job, Error **errp);
+ /*
+ * Functions run without regard to the BQL that may run in any
+ * arbitrary thread. These functions do not need to be thread-safe
+ * because the caller ensures that they are invoked from one
+ * thread at time.
+ */
+
/**
* If the callback is not NULL, it will be invoked when the job transitions
* into the paused state. Paused jobs must not perform any asynchronous
@@ -201,6 +236,13 @@ struct JobDriver {
*/
void coroutine_fn (*resume)(Job *job);
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
/**
* Called when the job is resumed by the user (i.e. user_paused becomes
* false). .user_resume is called before .resume.
@@ -251,8 +293,24 @@ struct JobDriver {
*/
void (*clean)(Job *job);
+ /**
+ * If the callback is not NULL, it will be invoked in job_cancel_async
+ *
+ * This function must return true if the job will be cancelled
+ * immediately without any further I/O (mandatory if @force is
+ * true), and false otherwise. This lets the generic job layer
+ * know whether a job has been truly (force-)cancelled, or whether
+ * it is just in a special completion mode (like mirror after
+ * READY).
+ * (If the callback is NULL, the job is assumed to terminate
+ * without I/O.)
+ */
+ bool (*cancel)(Job *job, bool force);
+
- /** Called when the job is freed */
+ /**
+ * Called when the job is freed.
+ */
void (*free)(Job *job);
};
@@ -267,6 +325,30 @@ typedef enum JobCreateFlags {
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
+extern QemuMutex job_mutex;
+
+#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex)
+
+#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex)
+
+/**
+ * job_lock:
+ *
+ * Take the mutex protecting the list of jobs and their status.
+ * Most functions called by the monitor need to call job_lock
+ * and job_unlock manually. On the other hand, function called
+ * by the block jobs themselves and by the block layer will take the
+ * lock for you.
+ */
+void job_lock(void);
+
+/**
+ * job_unlock:
+ *
+ * Release the mutex protecting the list of jobs and their status.
+ */
+void job_unlock(void);
+
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().
@@ -283,23 +365,20 @@ JobTxn *job_txn_new(void);
/**
* Release a reference that was previously acquired with job_txn_add_job or
* job_txn_new. If it's the last reference to the object, it will be freed.
+ *
+ * Called with job lock *not* held.
*/
void job_txn_unref(JobTxn *txn);
-/**
- * @txn: The transaction (may be NULL)
- * @job: Job to add to the transaction
- *
- * Add @job to the transaction. The @job must not already be in a transaction.
- * The caller must call either job_txn_unref() or job_completed() to release
- * the reference that is automatically grabbed here.
- *
- * If @txn is NULL, the function does nothing.
+/*
+ * Same as job_txn_unref(), but called with job lock held.
+ * Might release the lock temporarily.
*/
-void job_txn_add_job(JobTxn *txn, Job *job);
+void job_txn_unref_locked(JobTxn *txn);
/**
* Create a new long-running job and return it.
+ * Called with job_mutex *not* held.
*
* @job_id: The id of the newly-created job, or %NULL for internal jobs
* @driver: The class object for the newly-created job.
@@ -317,20 +396,26 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
/**
* Add a reference to Job refcnt, it will be decreased with job_unref, and then
* be freed if it comes to be the last reference.
+ *
+ * Called with job lock held.
*/
-void job_ref(Job *job);
+void job_ref_locked(Job *job);
/**
- * Release a reference that was previously acquired with job_ref() or
+ * Release a reference that was previously acquired with job_ref_locked() or
* job_create(). If it's the last reference to the object, it will be freed.
+ *
+ * Called with job lock held.
*/
-void job_unref(Job *job);
+void job_unref_locked(Job *job);
/**
* @job: The job that has made progress
* @done: How much progress the job made since the last call
*
* Updates the progress counter of the job.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_update(Job *job, uint64_t done);
@@ -341,6 +426,8 @@ void job_progress_update(Job *job, uint64_t done);
*
* Sets the expected end value of the progress counter of a job so that a
* completion percentage can be calculated when the progress is updated.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_set_remaining(Job *job, uint64_t remaining);
@@ -356,27 +443,27 @@ void job_progress_set_remaining(Job *job, uint64_t remaining);
* length before, and job_progress_update() afterwards.
* (So the operation acts as a parenthesis in regards to the main job
* operation running in background.)
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_increase_remaining(Job *job, uint64_t delta);
-/** To be called when a cancelled job is finalised. */
-void job_event_cancelled(Job *job);
-
-/** To be called when a successfully completed job is finalised. */
-void job_event_completed(Job *job);
-
/**
* Conditionally enter the job coroutine if the job is ready to run, not
* already busy and fn() returns true. fn() is called while under the job_lock
* critical section.
+ *
+ * Called with job lock held, but might release it temporarily.
*/
-void job_enter_cond(Job *job, bool(*fn)(Job *job));
+void job_enter_cond_locked(Job *job, bool(*fn)(Job *job));
/**
* @job: A job that has not yet been started.
*
* Begins execution of a job.
* Takes ownership of one reference to the job object.
+ *
+ * Called with job_mutex *not* held.
*/
void job_start(Job *job);
@@ -384,6 +471,7 @@ void job_start(Job *job);
* @job: The job to enter.
*
* Continue the specified job by entering the coroutine.
+ * Called with job_mutex *not* held.
*/
void job_enter(Job *job);
@@ -392,15 +480,18 @@ void job_enter(Job *job);
*
* Pause now if job_pause() has been called. Jobs that perform lots of I/O
* must call this between requests so that the job can be paused.
+ *
+ * Called with job_mutex *not* held.
*/
-void coroutine_fn job_pause_point(Job *job);
+void coroutine_fn GRAPH_UNLOCKED job_pause_point(Job *job);
/**
* @job: The job that calls the function.
*
* Yield the job coroutine.
+ * Called with job_mutex *not* held.
*/
-void job_yield(Job *job);
+void coroutine_fn job_yield(Job *job);
/**
* @job: The job that calls the function.
@@ -409,10 +500,11 @@ void job_yield(Job *job);
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
+ *
+ * Called with job_mutex *not* held.
*/
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
-
/** Returns the JobType of a given Job. */
JobType job_type(const Job *job);
@@ -422,102 +514,165 @@ const char *job_type_str(const Job *job);
/** Returns true if the job should not be visible to the management layer. */
bool job_is_internal(Job *job);
-/** Returns whether the job is scheduled for cancellation. */
+/**
+ * Returns whether the job is being cancelled.
+ * Called with job_mutex *not* held.
+ */
bool job_is_cancelled(Job *job);
-/** Returns whether the job is in a completed state. */
-bool job_is_completed(Job *job);
+/* Same as job_is_cancelled(), but called with job lock held. */
+bool job_is_cancelled_locked(Job *job);
+
+/**
+ * Returns whether the job is scheduled for cancellation (at an
+ * indefinite point).
+ * Called with job_mutex *not* held.
+ */
+bool job_cancel_requested(Job *job);
+
+/**
+ * Returns whether the job is in a completed state.
+ * Called with job lock held.
+ */
+bool job_is_completed_locked(Job *job);
-/** Returns whether the job is ready to be completed. */
+/**
+ * Returns whether the job is ready to be completed.
+ * Called with job_mutex *not* held.
+ */
bool job_is_ready(Job *job);
+/* Same as job_is_ready(), but called with job lock held. */
+bool job_is_ready_locked(Job *job);
+
/**
* Request @job to pause at the next pause point. Must be paired with
* job_resume(). If the job is supposed to be resumed by user action, call
- * job_user_pause() instead.
+ * job_user_pause_locked() instead.
+ *
+ * Called with job lock *not* held.
*/
void job_pause(Job *job);
-/** Resumes a @job paused with job_pause. */
+/* Same as job_pause(), but called with job lock held. */
+void job_pause_locked(Job *job);
+
+/** Resumes a @job paused with job_pause. Called with job lock *not* held. */
void job_resume(Job *job);
+/*
+ * Same as job_resume(), but called with job lock held.
+ * Might release the lock temporarily.
+ */
+void job_resume_locked(Job *job);
+
/**
* Asynchronously pause the specified @job.
* Do not allow a resume until a matching call to job_user_resume.
+ * Called with job lock held.
*/
-void job_user_pause(Job *job, Error **errp);
+void job_user_pause_locked(Job *job, Error **errp);
-/** Returns true if the job is user-paused. */
-bool job_user_paused(Job *job);
+/**
+ * Returns true if the job is user-paused.
+ * Called with job lock held.
+ */
+bool job_user_paused_locked(Job *job);
/**
* Resume the specified @job.
- * Must be paired with a preceding job_user_pause.
+ * Must be paired with a preceding job_user_pause_locked.
+ * Called with job lock held, but might release it temporarily.
*/
-void job_user_resume(Job *job, Error **errp);
+void job_user_resume_locked(Job *job, Error **errp);
/**
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock *not* held.
*/
Job *job_next(Job *job);
+/* Same as job_next(), but called with job lock held. */
+Job *job_next_locked(Job *job);
+
/**
* Get the job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock held.
*/
-Job *job_get(const char *id);
+Job *job_get_locked(const char *id);
/**
* Check whether the verb @verb can be applied to @job in its current state.
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
* returned.
+ *
+ * Called with job lock held.
*/
-int job_apply_verb(Job *job, JobVerb verb, Error **errp);
+int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp);
-/** The @job could not be started, free it. */
+/**
+ * The @job could not be started, free it.
+ * Called with job_mutex *not* held.
+ */
void job_early_fail(Job *job);
-/** Moves the @job from RUNNING to READY */
+/**
+ * Moves the @job from RUNNING to READY.
+ * Called with job_mutex *not* held.
+ */
void job_transition_to_ready(Job *job);
-/** Asynchronously complete the specified @job. */
-void job_complete(Job *job, Error **errp);
+/**
+ * Asynchronously complete the specified @job.
+ * Called with job lock held, but might release it temporarily.
+ */
+void job_complete_locked(Job *job, Error **errp);
/**
* Asynchronously cancel the specified @job. If @force is true, the job should
* be cancelled immediately without waiting for a consistent state.
+ * Called with job lock held.
*/
-void job_cancel(Job *job, bool force);
+void job_cancel_locked(Job *job, bool force);
/**
- * Cancels the specified job like job_cancel(), but may refuse to do so if the
- * operation isn't meaningful in the current state of the job.
+ * Cancels the specified job like job_cancel_locked(), but may refuse
+ * to do so if the operation isn't meaningful in the current state of the job.
+ * Called with job lock held.
*/
-void job_user_cancel(Job *job, bool force, Error **errp);
+void job_user_cancel_locked(Job *job, bool force, Error **errp);
/**
* Synchronously cancel the @job. The completion callback is called
- * before the function returns. The job may actually complete
- * instead of canceling itself; the circumstances under which this
- * happens depend on the kind of job that is active.
+ * before the function returns. If @force is false, the job may
+ * actually complete instead of canceling itself; the circumstances
+ * under which this happens depend on the kind of job that is active.
*
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock *not* held.
*/
-int job_cancel_sync(Job *job);
+int job_cancel_sync(Job *job, bool force);
+
+/* Same as job_cancel_sync, but called with job lock held. */
+int job_cancel_sync_locked(Job *job, bool force);
-/** Synchronously cancels all jobs using job_cancel_sync(). */
+/**
+ * Synchronously force-cancels all jobs using job_cancel_sync_locked().
+ *
+ * Called with job_lock *not* held.
+ */
void job_cancel_sync_all(void);
/**
* @job: The job to be completed.
- * @errp: Error object which may be set by job_complete(); this is not
+ * @errp: Error object which may be set by job_complete_locked(); this is not
* necessarily set on every error, the job return value has to be
* checked as well.
*
@@ -526,10 +681,9 @@ void job_cancel_sync_all(void);
* function).
*
* Returns the return value from the job.
- *
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held.
*/
-int job_complete_sync(Job *job, Error **errp);
+int job_complete_sync_locked(Job *job, Error **errp);
/**
* For a @job that has finished its work and is pending awaiting explicit
@@ -538,14 +692,18 @@ int job_complete_sync(Job *job, Error **errp);
* FIXME: Make the below statement universally true:
* For jobs that support the manual workflow mode, all graph changes that occur
* as a result will occur after this command and before a successful reply.
+ *
+ * Called with job lock held.
*/
-void job_finalize(Job *job, Error **errp);
+void job_finalize_locked(Job *job, Error **errp);
/**
* Remove the concluded @job from the query list and resets the passed pointer
* to %NULL. Returns an error if the job is not actually concluded.
+ *
+ * Called with job lock held.
*/
-void job_dismiss(Job **job, Error **errp);
+void job_dismiss_locked(Job **job, Error **errp);
/**
* Synchronously finishes the given @job. If @finish is given, it is called to
@@ -554,8 +712,20 @@ void job_dismiss(Job **job, Error **errp);
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
* cancelled before completing, and -errno in other error cases.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held, but might release it temporarily.
+ */
+int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
+ Error **errp);
+
+/**
+ * Sets the @job->aio_context.
+ * Called with job_mutex *not* held.
+ *
+ * This function must run in the main thread to protect against
+ * concurrent read in job_finish_sync_locked(), takes the job_mutex
+ * lock to protect against the read in job_do_yield_locked(), and must
+ * be called when the job is quiescent.
*/
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
+void job_set_aio_context(Job *job, AioContext *ctx);
#endif
diff --git a/include/qemu/keyval.h b/include/qemu/keyval.h
new file mode 100644
index 0000000000..1187b68303
--- /dev/null
+++ b/include/qemu/keyval.h
@@ -0,0 +1,15 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef KEYVAL_H
+#define KEYVAL_H
+
+QDict *keyval_parse_into(QDict *qdict, const char *params, const char *implied_key,
+ bool *p_help, Error **errp);
+QDict *keyval_parse(const char *params, const char *implied_key,
+ bool *help, Error **errp);
+void keyval_merge(QDict *old, const QDict *new, Error **errp);
+
+#endif /* KEYVAL_H */
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
index b620023141..9823220446 100644
--- a/include/qemu/lockable.h
+++ b/include/qemu/lockable.h
@@ -13,7 +13,7 @@
#ifndef QEMU_LOCKABLE_H
#define QEMU_LOCKABLE_H
-#include "qemu/coroutine.h"
+#include "qemu/coroutine-core.h"
#include "qemu/thread.h"
typedef void QemuLockUnlockFunc(void *);
@@ -24,79 +24,71 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
-/* This function gives an error if an invalid, non-NULL pointer type is passed
- * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
- * from the compiler, and give the errors already at link time.
- */
-#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
-void unknown_lock_type(void *);
-#else
-static inline void unknown_lock_type(void *unused)
-{
- abort();
-}
-#endif
-
static inline __attribute__((__always_inline__)) QemuLockable *
qemu_make_lockable(void *x, QemuLockable *lockable)
{
- /* We cannot test this in a macro, otherwise we get compiler
+ /*
+ * We cannot test this in a macro, otherwise we get compiler
* warnings like "the address of 'm' will always evaluate as 'true'".
*/
return x ? lockable : NULL;
}
-/* Auxiliary macros to simplify QEMU_MAKE_LOCABLE. */
-#define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *) \
- QEMU_GENERIC(x, \
- (QemuMutex *, qemu_mutex_lock), \
- (QemuRecMutex *, qemu_rec_mutex_lock), \
- (CoMutex *, qemu_co_mutex_lock), \
- (QemuSpin *, qemu_spin_lock), \
- unknown_lock_type))
-
-#define QEMU_UNLOCK_FUNC(x) ((QemuLockUnlockFunc *) \
- QEMU_GENERIC(x, \
- (QemuMutex *, qemu_mutex_unlock), \
- (QemuRecMutex *, qemu_rec_mutex_unlock), \
- (CoMutex *, qemu_co_mutex_unlock), \
- (QemuSpin *, qemu_spin_unlock), \
- unknown_lock_type))
-
-/* In C, compound literals have the lifetime of an automatic variable.
+static inline __attribute__((__always_inline__)) QemuLockable *
+qemu_null_lockable(void *x)
+{
+ if (x != NULL) {
+ qemu_build_not_reached();
+ }
+ return NULL;
+}
+
+/*
+ * In C, compound literals have the lifetime of an automatic variable.
* In C++ it would be different, but then C++ wouldn't need QemuLockable
* either...
*/
-#define QEMU_MAKE_LOCKABLE_(x) (&(QemuLockable) { \
- .object = (x), \
- .lock = QEMU_LOCK_FUNC(x), \
- .unlock = QEMU_UNLOCK_FUNC(x), \
+#define QML_OBJ_(x, name) (&(QemuLockable) { \
+ .object = (x), \
+ .lock = (QemuLockUnlockFunc *) qemu_ ## name ## _lock, \
+ .unlock = (QemuLockUnlockFunc *) qemu_ ## name ## _unlock \
})
-/* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
+/**
+ * QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
*
- * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
+ * @x: a lock object (currently one of QemuMutex, QemuRecMutex,
+ * CoMutex, QemuSpin).
*
* Returns a QemuLockable object that can be passed around
* to a function that can operate with locks of any kind, or
* NULL if @x is %NULL.
+ *
+ * Note the special case for void *, so that we may pass "NULL".
*/
-#define QEMU_MAKE_LOCKABLE(x) \
- QEMU_GENERIC(x, \
- (QemuLockable *, (x)), \
- qemu_make_lockable((x), QEMU_MAKE_LOCKABLE_(x)))
+#define QEMU_MAKE_LOCKABLE(x) \
+ _Generic((x), QemuLockable *: (x), \
+ void *: qemu_null_lockable(x), \
+ QemuMutex *: qemu_make_lockable(x, QML_OBJ_(x, mutex)), \
+ QemuRecMutex *: qemu_make_lockable(x, QML_OBJ_(x, rec_mutex)), \
+ CoMutex *: qemu_make_lockable(x, QML_OBJ_(x, co_mutex)), \
+ QemuSpin *: qemu_make_lockable(x, QML_OBJ_(x, spin)))
-/* QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
+/**
+ * QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
*
- * @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
+ * @x: a lock object (currently one of QemuMutex, QemuRecMutex,
+ * CoMutex, QemuSpin).
*
* Returns a QemuLockable object that can be passed around
* to a function that can operate with locks of any kind.
*/
-#define QEMU_MAKE_LOCKABLE_NONNULL(x) \
- QEMU_GENERIC(x, \
- (QemuLockable *, (x)), \
- QEMU_MAKE_LOCKABLE_(x))
+#define QEMU_MAKE_LOCKABLE_NONNULL(x) \
+ _Generic((x), QemuLockable *: (x), \
+ QemuMutex *: QML_OBJ_(x, mutex), \
+ QemuRecMutex *: QML_OBJ_(x, rec_mutex), \
+ CoMutex *: QML_OBJ_(x, co_mutex), \
+ QemuSpin *: QML_OBJ_(x, spin))
static inline void qemu_lockable_lock(QemuLockable *x)
{
diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h
index 2f0a5b080e..d47c9cd446 100644
--- a/include/qemu/log-for-trace.h
+++ b/include/qemu/log-for-trace.h
@@ -30,6 +30,6 @@ static inline bool qemu_loglevel_mask(int mask)
}
/* main logging function */
-int GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...);
+void G_GNUC_PRINTF(1, 2) qemu_log(const char *fmt, ...);
#endif
diff --git a/include/qemu/log.h b/include/qemu/log.h
index f4724f7330..df59bfabcd 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -3,46 +3,16 @@
/* A small part of this API is split into its own header */
#include "qemu/log-for-trace.h"
-#include "qemu/rcu.h"
-
-typedef struct QemuLogFile {
- struct rcu_head rcu;
- FILE *fd;
-} QemuLogFile;
-
-/* Private global variable, don't use */
-extern QemuLogFile *qemu_logfile;
-
/*
* The new API:
- *
*/
-/* Log settings checking macros: */
-
-/* Returns true if qemu_log() will really write somewhere
- */
-static inline bool qemu_log_enabled(void)
-{
- return qemu_logfile != NULL;
-}
+/* Returns true if qemu_log() will really write somewhere. */
+bool qemu_log_enabled(void);
-/* Returns true if qemu_log() will write somewhere else than stderr
- */
-static inline bool qemu_log_separate(void)
-{
- QemuLogFile *logfile;
- bool res = false;
-
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile && logfile->fd != stderr) {
- res = true;
- }
- rcu_read_unlock();
- return res;
-}
+/* Returns true if qemu_log() will write somewhere other than stderr. */
+bool qemu_log_separate(void);
#define CPU_LOG_TB_OUT_ASM (1 << 0)
#define CPU_LOG_TB_IN_ASM (1 << 1)
@@ -64,51 +34,16 @@ static inline bool qemu_log_separate(void)
#define CPU_LOG_PLUGIN (1 << 18)
/* LOG_STRACE is used for user-mode strace logging. */
#define LOG_STRACE (1 << 19)
+#define LOG_PER_THREAD (1 << 20)
+#define CPU_LOG_TB_VPU (1 << 21)
-/* Lock output for a series of related logs. Since this is not needed
- * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we
- * assume that qemu_loglevel_mask has already been tested, and that
- * qemu_loglevel is never set when qemu_logfile is unset.
- */
+/* Lock/unlock output. */
-static inline FILE *qemu_log_lock(void)
-{
- QemuLogFile *logfile;
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile) {
- qemu_flockfile(logfile->fd);
- return logfile->fd;
- } else {
- return NULL;
- }
-}
-
-static inline void qemu_log_unlock(FILE *fd)
-{
- if (fd) {
- qemu_funlockfile(fd);
- }
- rcu_read_unlock();
-}
+FILE *qemu_log_trylock(void) G_GNUC_WARN_UNUSED_RESULT;
+void qemu_log_unlock(FILE *fd);
/* Logging functions: */
-/* vfprintf-like logging function
- */
-static inline void GCC_FMT_ATTR(1, 0)
-qemu_log_vprintf(const char *fmt, va_list va)
-{
- QemuLogFile *logfile;
-
- rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
- if (logfile) {
- vfprintf(logfile->fd, fmt, va);
- }
- rcu_read_unlock();
-}
-
/* log only if a bit is set on the current loglevel mask:
* @mask: bit to check in the mask
* @fmt: printf-style format string
@@ -147,9 +82,9 @@ typedef struct QEMULogItem {
extern const QEMULogItem qemu_log_items[];
-void qemu_set_log(int log_flags);
-void qemu_log_needs_buffers(void);
-void qemu_set_log_filename(const char *filename, Error **errp);
+bool qemu_set_log(int log_flags, Error **errp);
+bool qemu_set_log_filename(const char *filename, Error **errp);
+bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp);
void qemu_set_dfilter_ranges(const char *ranges, Error **errp);
bool qemu_log_in_addr_range(uint64_t addr);
int qemu_str_to_log_mask(const char *str);
@@ -159,9 +94,4 @@ int qemu_str_to_log_mask(const char *str);
*/
void qemu_print_log_usage(FILE *f);
-/* fflush() the log file */
-void qemu_log_flush(void);
-/* Close the log file */
-void qemu_log_close(void);
-
#endif
diff --git a/include/qemu/madvise.h b/include/qemu/madvise.h
new file mode 100644
index 0000000000..e155f59a0d
--- /dev/null
+++ b/include/qemu/madvise.h
@@ -0,0 +1,95 @@
+/*
+ * QEMU madvise wrapper functions
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MADVISE_H
+#define QEMU_MADVISE_H
+
+#define QEMU_MADV_INVALID -1
+
+#if defined(CONFIG_MADVISE)
+
+#define QEMU_MADV_WILLNEED MADV_WILLNEED
+#define QEMU_MADV_DONTNEED MADV_DONTNEED
+#ifdef MADV_DONTFORK
+#define QEMU_MADV_DONTFORK MADV_DONTFORK
+#else
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#endif
+#ifdef MADV_MERGEABLE
+#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
+#else
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_UNMERGEABLE
+#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE
+#else
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_DODUMP
+#define QEMU_MADV_DODUMP MADV_DODUMP
+#else
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#endif
+#ifdef MADV_DONTDUMP
+#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
+#else
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#endif
+#ifdef MADV_HUGEPAGE
+#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
+#else
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_NOHUGEPAGE
+#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE
+#else
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_REMOVE
+#define QEMU_MADV_REMOVE MADV_REMOVE
+#else
+#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#endif
+#ifdef MADV_POPULATE_WRITE
+#define QEMU_MADV_POPULATE_WRITE MADV_POPULATE_WRITE
+#else
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+#endif
+
+#elif defined(CONFIG_POSIX_MADVISE)
+
+#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
+#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+
+#else /* no-op */
+
+#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
+#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+
+#endif
+
+int qemu_madvise(void *addr, size_t len, int advice);
+
+#endif
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index 8e98613656..5764db157c 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -26,9 +26,19 @@
#define QEMU_MAIN_LOOP_H
#include "block/aio.h"
+#include "qom/object.h"
+#include "sysemu/event-loop-base.h"
#define SIG_IPI SIGUSR1
+#define TYPE_MAIN_LOOP "main-loop"
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
+
+struct MainLoop {
+ EventLoopBase parent_obj;
+};
+typedef struct MainLoop MainLoop;
+
/**
* qemu_init_main_loop: Set up the process so that it can run the main loop.
*
@@ -52,7 +62,7 @@ int qemu_init_main_loop(Error **errp);
* repeatedly calls main_loop_wait(false).
*
* Main loop services include file descriptor callbacks, bottom halves
- * and timers (defined in qemu-timer.h). Bottom halves are similar to timers
+ * and timers (defined in qemu/timer.h). Bottom halves are similar to timers
* that execute immediately, but have a lower overhead and scheduling them
* is wait-free, thread-safe and signal-safe.
*
@@ -147,6 +157,8 @@ typedef void WaitObjectFunc(void *opaque);
* in the main loop's calls to WaitForMultipleObjects. When the handle
* is in a signaled state, QEMU will call @func.
*
+ * If the same HANDLE is added twice, this function returns -1.
+ *
* @handle: The Windows handle to be observed.
* @func: A function to be called when @handle is in a signaled state.
* @opaque: A pointer-size value that is passed to @func.
@@ -234,85 +246,151 @@ void event_notifier_set_handler(EventNotifier *e,
GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
-#ifdef CONFIG_POSIX
+
/**
- * qemu_add_child_watch: Register a child process for reaping.
- *
- * Under POSIX systems, a parent process must read the exit status of
- * its child processes using waitpid, or the operating system will not
- * free some of the resources attached to that process.
+ * bql_locked: Return lock status of the Big QEMU Lock (BQL)
*
- * This function directs the QEMU main loop to observe a child process
- * and call waitpid as soon as it exits; the watch is then removed
- * automatically. It is useful whenever QEMU forks a child process
- * but will find out about its termination by other means such as a
- * "broken pipe".
+ * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it
+ * must always be taken outside other locks. This function helps
+ * functions take different paths depending on whether the current
+ * thread is running within the BQL.
*
- * @pid: The pid that QEMU should observe.
+ * This function should never be used in the block layer, because
+ * unit tests, block layer tools and qemu-storage-daemon do not
+ * have a BQL.
+ * Please instead refer to qemu_in_main_thread().
*/
-int qemu_add_child_watch(pid_t pid);
-#endif
+bool bql_locked(void);
/**
- * qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
+ * qemu_in_main_thread: return whether it's possible to safely access
+ * the global state of the block layer.
*
- * The main loop mutex is the coarsest lock in QEMU, and as such it
- * must always be taken outside other locks. This function helps
- * functions take different paths depending on whether the current
- * thread is running within the main loop mutex.
+ * Global state of the block layer is not accessible from I/O threads
+ * or worker threads; only from threads that "own" the default
+ * AioContext that qemu_get_aio_context() returns. For tests, block
+ * layer tools and qemu-storage-daemon there is a designated thread that
+ * runs the event loop for qemu_get_aio_context(), and that is the
+ * main thread.
+ *
+ * For emulators, however, any thread that holds the BQL can act
+ * as the block layer main thread; this will be any of the actual
+ * main thread, the vCPU threads or the RCU thread.
+ *
+ * For clarity, do not use this function outside the block layer.
+ */
+bool qemu_in_main_thread(void);
+
+/*
+ * Mark and check that the function is part of the Global State API.
+ * Please refer to include/block/block-global-state.h for more
+ * information about GS API.
*/
-bool qemu_mutex_iothread_locked(void);
+#define GLOBAL_STATE_CODE() \
+ do { \
+ assert(qemu_in_main_thread()); \
+ } while (0)
+
+/*
+ * Mark and check that the function is part of the I/O API.
+ * Please refer to include/block/block-io.h for more
+ * information about IO API.
+ */
+#define IO_CODE() \
+ do { \
+ /* nop */ \
+ } while (0)
+
+/*
+ * Mark and check that the function is part of the "I/O OR GS" API.
+ * Please refer to include/block/block-io.h for more
+ * information about "IO or GS" API.
+ */
+#define IO_OR_GS_CODE() \
+ do { \
+ /* nop */ \
+ } while (0)
/**
- * qemu_mutex_lock_iothread: Lock the main loop mutex.
+ * bql_lock: Lock the Big QEMU Lock (BQL).
*
- * This function locks the main loop mutex. The mutex is taken by
+ * This function locks the Big QEMU Lock (BQL). The lock is taken by
* main() in vl.c and always taken except while waiting on
- * external events (such as with select). The mutex should be taken
+ * external events (such as with select). The lock should be taken
* by threads other than the main loop thread when calling
* qemu_bh_new(), qemu_set_fd_handler() and basically all other
* functions documented in this file.
*
- * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
+ * NOTE: tools currently are single-threaded and bql_lock
* is a no-op there.
*/
-#define qemu_mutex_lock_iothread() \
- qemu_mutex_lock_iothread_impl(__FILE__, __LINE__)
-void qemu_mutex_lock_iothread_impl(const char *file, int line);
+#define bql_lock() bql_lock_impl(__FILE__, __LINE__)
+void bql_lock_impl(const char *file, int line);
/**
- * qemu_mutex_unlock_iothread: Unlock the main loop mutex.
+ * bql_unlock: Unlock the Big QEMU Lock (BQL).
*
- * This function unlocks the main loop mutex. The mutex is taken by
+ * This function unlocks the Big QEMU Lock. The lock is taken by
* main() in vl.c and always taken except while waiting on
- * external events (such as with select). The mutex should be unlocked
+ * external events (such as with select). The lock should be unlocked
* as soon as possible by threads other than the main loop thread,
* because it prevents the main loop from processing callbacks,
* including timers and bottom halves.
*
- * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
+ * NOTE: tools currently are single-threaded and bql_unlock
* is a no-op there.
*/
-void qemu_mutex_unlock_iothread(void);
+void bql_unlock(void);
+
+/**
+ * BQL_LOCK_GUARD
+ *
+ * Wrap a block of code in a conditional bql_{lock,unlock}.
+ */
+typedef struct BQLLockAuto BQLLockAuto;
+
+static inline BQLLockAuto *bql_auto_lock(const char *file, int line)
+{
+ if (bql_locked()) {
+ return NULL;
+ }
+ bql_lock_impl(file, line);
+ /* Anything non-NULL causes the cleanup function to be called */
+ return (BQLLockAuto *)(uintptr_t)1;
+}
+
+static inline void bql_auto_unlock(BQLLockAuto *l)
+{
+ bql_unlock();
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock)
+
+#define BQL_LOCK_GUARD() \
+ g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \
+ = bql_auto_lock(__FILE__, __LINE__)
/*
- * qemu_cond_wait_iothread: Wait on condition for the main loop mutex
+ * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL)
*
- * This function atomically releases the main loop mutex and causes
+ * This function atomically releases the Big QEMU Lock (BQL) and causes
* the calling thread to block on the condition.
*/
-void qemu_cond_wait_iothread(QemuCond *cond);
+void qemu_cond_wait_bql(QemuCond *cond);
/*
- * qemu_cond_timedwait_iothread: like the previous, but with timeout
+ * qemu_cond_timedwait_bql: like the previous, but with timeout
*/
-void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
+void qemu_cond_timedwait_bql(QemuCond *cond, int ms);
/* internal interfaces */
-void qemu_fd_register(int fd);
-
-QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
+#define qemu_bh_new_guarded(cb, opaque, guard) \
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
+#define qemu_bh_new(cb, opaque) \
+ qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
+QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
+ MemReentrancyGuard *reentrancy_guard);
void qemu_bh_schedule_idle(QEMUBH *bh);
enum {
diff --git a/include/qemu/memalign.h b/include/qemu/memalign.h
new file mode 100644
index 0000000000..fa299f3bf6
--- /dev/null
+++ b/include/qemu/memalign.h
@@ -0,0 +1,61 @@
+/*
+ * Allocation and free functions for aligned memory
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MEMALIGN_H
+#define QEMU_MEMALIGN_H
+
+/**
+ * qemu_try_memalign: Allocate aligned memory
+ * @alignment: required alignment, in bytes
+ * @size: size of allocation, in bytes
+ *
+ * Allocate memory on an aligned boundary (i.e. the returned
+ * address will be an exact multiple of @alignment).
+ * @alignment must be a power of 2, or the function will assert().
+ * On success, returns allocated memory; on failure, returns NULL.
+ *
+ * The memory allocated through this function must be freed via
+ * qemu_vfree() (and not via free()).
+ */
+void *qemu_try_memalign(size_t alignment, size_t size);
+/**
+ * qemu_memalign: Allocate aligned memory, without failing
+ * @alignment: required alignment, in bytes
+ * @size: size of allocation, in bytes
+ *
+ * Allocate memory in the same way as qemu_try_memalign(), but
+ * abort() with an error message if the memory allocation fails.
+ *
+ * The memory allocated through this function must be freed via
+ * qemu_vfree() (and not via free()).
+ */
+void *qemu_memalign(size_t alignment, size_t size);
+/**
+ * qemu_vfree: Free memory allocated through qemu_memalign
+ * @ptr: memory to free
+ *
+ * This function must be used to free memory allocated via qemu_memalign()
+ * or qemu_try_memalign(). (Using the wrong free function will cause
+ * subtle bugs on Windows hosts.)
+ */
+void qemu_vfree(void *ptr);
+/*
+ * It's an analog of GLIB's g_autoptr_cleanup_generic_gfree(), used to define
+ * g_autofree macro.
+ */
+static inline void qemu_cleanup_generic_vfree(void *p)
+{
+ void **pp = (void **)p;
+ qemu_vfree(*pp);
+}
+
+/*
+ * Analog of g_autofree, but qemu_vfree is called on cleanup instead of g_free.
+ */
+#define QEMU_AUTO_VFREE __attribute__((cleanup(qemu_cleanup_generic_vfree)))
+
+#endif
diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h
index e786266b92..8344daaa03 100644
--- a/include/qemu/mmap-alloc.h
+++ b/include/qemu/mmap-alloc.h
@@ -1,21 +1,32 @@
#ifndef QEMU_MMAP_ALLOC_H
#define QEMU_MMAP_ALLOC_H
+typedef enum {
+ QEMU_FS_TYPE_UNKNOWN = 0,
+ QEMU_FS_TYPE_TMPFS,
+ QEMU_FS_TYPE_HUGETLBFS,
+ QEMU_FS_TYPE_NUM,
+} QemuFsType;
size_t qemu_fd_getpagesize(int fd);
-
-size_t qemu_mempath_getpagesize(const char *mem_path);
+QemuFsType qemu_fd_getfs(int fd);
/**
- * qemu_ram_mmap: mmap the specified file or device.
+ * qemu_ram_mmap: mmap anonymous memory, the specified file or device.
+ *
+ * mmap() abstraction to map guest RAM, simplifying flag handling, taking
+ * care of alignment requirements and installing guard pages.
*
* Parameters:
* @fd: the file or the device to mmap
* @size: the number of bytes to be mmaped
* @align: if not zero, specify the alignment of the starting mapping address;
* otherwise, the alignment in use will be determined by QEMU.
- * @shared: map has RAM_SHARED flag.
- * @is_pmem: map has RAM_PMEM flag.
+ * @qemu_map_flags: QEMU_MAP_* flags
+ * @map_offset: map starts at offset of map_offset from the start of fd
+ *
+ * Internally, MAP_PRIVATE, MAP_ANONYMOUS and MAP_SHARED_VALIDATE are set
+ * implicitly based on other parameters.
*
* Return:
* On success, return a pointer to the mapped area.
@@ -24,9 +35,32 @@ size_t qemu_mempath_getpagesize(const char *mem_path);
void *qemu_ram_mmap(int fd,
size_t size,
size_t align,
- bool shared,
- bool is_pmem);
+ uint32_t qemu_map_flags,
+ off_t map_offset);
void qemu_ram_munmap(int fd, void *ptr, size_t size);
+/*
+ * Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example,
+ * consumed by qemu_ram_mmap().
+ */
+
+/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */
+#define QEMU_MAP_READONLY (1 << 0)
+
+/* Use MAP_SHARED instead of MAP_PRIVATE. */
+#define QEMU_MAP_SHARED (1 << 1)
+
+/*
+ * Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without
+ * QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC.
+ */
+#define QEMU_MAP_SYNC (1 << 2)
+
+/*
+ * Use MAP_NORESERVE to skip reservation of swap space (or huge pages if
+ * applicable). Bail out if not supported/effective.
+ */
+#define QEMU_MAP_NORESERVE (1 << 3)
+
#endif
diff --git a/include/qemu/module.h b/include/qemu/module.h
index 9121a475c1..c37ce74b16 100644
--- a/include/qemu/module.h
+++ b/include/qemu/module.h
@@ -61,16 +61,132 @@ typedef enum {
#define fuzz_target_init(function) module_init(function, \
MODULE_INIT_FUZZ_TARGET)
#define migration_init(function) module_init(function, MODULE_INIT_MIGRATION)
-#define block_module_load_one(lib) module_load_one("block-", lib)
-#define ui_module_load_one(lib) module_load_one("ui-", lib)
-#define audio_module_load_one(lib) module_load_one("audio-", lib)
+#define block_module_load(lib, errp) module_load("block-", lib, errp)
+#define ui_module_load(lib, errp) module_load("ui-", lib, errp)
+#define audio_module_load(lib, errp) module_load("audio-", lib, errp)
void register_module_init(void (*fn)(void), module_init_type type);
void register_dso_module_init(void (*fn)(void), module_init_type type);
void module_call_init(module_init_type type);
-bool module_load_one(const char *prefix, const char *lib_name);
-void module_load_qom_one(const char *type);
+
+/*
+ * module_load: attempt to load a module from a set of directories
+ *
+ * directories searched are:
+ * - getenv("QEMU_MODULE_DIR")
+ * - get_relocated_path(CONFIG_QEMU_MODDIR);
+ * - /var/run/qemu/${version_dir}
+ *
+ * prefix: a subsystem prefix, or the empty string ("audio-", ..., "")
+ * name: name of the module
+ * errp: error to set in case the module is found, but load failed.
+ *
+ * Return value: -1 on error (errp set if not NULL).
+ * 0 if module or one of its dependencies are not installed,
+ * 1 if the module is found and loaded,
+ * 2 if the module is already loaded, or module is built-in.
+ */
+int module_load(const char *prefix, const char *name, Error **errp);
+
+/*
+ * module_load_qom: attempt to load a module to provide a QOM type
+ *
+ * type: the type to be provided
+ * errp: error to set.
+ *
+ * Return value: as per module_load.
+ */
+int module_load_qom(const char *type, Error **errp);
void module_load_qom_all(void);
+void module_allow_arch(const char *arch);
+
+/**
+ * DOC: module info annotation macros
+ *
+ * ``scripts/modinfo-collect.py`` will collect module info,
+ * using the preprocessor and -DQEMU_MODINFO.
+ *
+ * ``scripts/modinfo-generate.py`` will create a module meta-data database
+ * from the collected information so qemu knows about module
+ * dependencies and QOM objects implemented by modules.
+ *
+ * See ``*.modinfo`` and ``modinfo.c`` in the build directory to check the
+ * script results.
+ */
+#ifdef QEMU_MODINFO
+# define modinfo(kind, value) \
+ MODINFO_START kind value MODINFO_END
+#else
+# define modinfo(kind, value)
+#endif
+
+/**
+ * module_obj
+ *
+ * @name: QOM type.
+ *
+ * This module implements QOM type @name.
+ */
+#define module_obj(name) modinfo(obj, name)
+
+/**
+ * module_dep
+ *
+ * @name: module name
+ *
+ * This module depends on module @name.
+ */
+#define module_dep(name) modinfo(dep, name)
+
+/**
+ * module_arch
+ *
+ * @name: target architecture
+ *
+ * This module is for target architecture @arch.
+ *
+ * Note that target-dependent modules are tagged automatically, so
+ * this is only needed in case target-independent modules should be
+ * restricted. Use case example: the ccw bus is implemented by s390x
+ * only.
+ */
+#define module_arch(name) modinfo(arch, name)
+
+/**
+ * module_opts
+ *
+ * @name: QemuOpts name
+ *
+ * This module registers QemuOpts @name.
+ */
+#define module_opts(name) modinfo(opts, name)
+
+/**
+ * module_kconfig
+ *
+ * @name: Kconfig requirement necessary to load the module
+ *
+ * This module requires a core module that should be implemented and
+ * enabled in Kconfig.
+ */
+#define module_kconfig(name) modinfo(kconfig, name)
+
+/*
+ * module info database
+ *
+ * scripts/modinfo-generate.c will build this using the data collected
+ * by scripts/modinfo-collect.py
+ */
+typedef struct QemuModinfo QemuModinfo;
+struct QemuModinfo {
+ const char *name;
+ const char *arch;
+ const char **objs;
+ const char **deps;
+ const char **opts;
+};
+extern const QemuModinfo qemu_modinfo[];
+void module_init_info(const QemuModinfo *info);
#endif
diff --git a/include/qemu/mprotect.h b/include/qemu/mprotect.h
new file mode 100644
index 0000000000..1e83d1433e
--- /dev/null
+++ b/include/qemu/mprotect.h
@@ -0,0 +1,14 @@
+/*
+ * QEMU mprotect functions
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_MPROTECT_H
+#define QEMU_MPROTECT_H
+
+int qemu_mprotect_rw(void *addr, size_t size);
+int qemu_mprotect_rwx(void *addr, size_t size);
+int qemu_mprotect_none(void *addr, size_t size);
+
+#endif
diff --git a/include/qemu/notify.h b/include/qemu/notify.h
index bcfa70fb2e..abf18dbf59 100644
--- a/include/qemu/notify.h
+++ b/include/qemu/notify.h
@@ -45,12 +45,16 @@ bool notifier_list_empty(NotifierList *list);
/* Same as Notifier but allows .notify() to return errors */
typedef struct NotifierWithReturn NotifierWithReturn;
+/* Return int to allow for different failure modes and recovery actions */
+typedef int (*NotifierWithReturnFunc)(NotifierWithReturn *notifier, void *data,
+ Error **errp);
+
struct NotifierWithReturn {
/**
* Return 0 on success (next notifier will be invoked), otherwise
* notifier_with_return_list_notify() will stop and return the value.
*/
- int (*notify)(NotifierWithReturn *notifier, void *data);
+ NotifierWithReturnFunc notify;
QLIST_ENTRY(NotifierWithReturn) node;
};
@@ -69,6 +73,6 @@ void notifier_with_return_list_add(NotifierWithReturnList *list,
void notifier_with_return_remove(NotifierWithReturn *notifier);
int notifier_with_return_list_notify(NotifierWithReturnList *list,
- void *data);
+ void *data, Error **errp);
#endif
diff --git a/include/qemu/nvdimm-utils.h b/include/qemu/nvdimm-utils.h
index 4b8b198ba7..5f45774c2c 100644
--- a/include/qemu/nvdimm-utils.h
+++ b/include/qemu/nvdimm-utils.h
@@ -1,7 +1,6 @@
#ifndef NVDIMM_UTILS_H
#define NVDIMM_UTILS_H
-#include "qemu/osdep.h"
GSList *nvdimm_get_device_list(void);
#endif
diff --git a/include/qemu/option.h b/include/qemu/option.h
index 05e8a15c73..b349828782 100644
--- a/include/qemu/option.h
+++ b/include/qemu/option.h
@@ -119,8 +119,6 @@ QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id,
int fail_if_exists, Error **errp);
void qemu_opts_reset(QemuOptsList *list);
void qemu_opts_loc_restore(QemuOpts *opts);
-bool qemu_opts_set(QemuOptsList *list, const char *id,
- const char *name, const char *value, Error **errp);
const char *qemu_opts_id(QemuOpts *opts);
void qemu_opts_set_id(QemuOpts *opts, char *id);
void qemu_opts_del(QemuOpts *opts);
@@ -131,8 +129,6 @@ QemuOpts *qemu_opts_parse_noisily(QemuOptsList *list, const char *params,
bool permit_abbrev);
QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params,
bool permit_abbrev, Error **errp);
-void qemu_opts_set_defaults(QemuOptsList *list, const char *params,
- int permit_abbrev);
QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict,
Error **errp);
QDict *qemu_opts_to_qdict_filtered(QemuOpts *opts, QDict *qdict,
@@ -148,7 +144,6 @@ void qemu_opts_print_help(QemuOptsList *list, bool print_caption);
void qemu_opts_free(QemuOptsList *list);
QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list);
-QDict *keyval_parse(const char *params, const char *implied_key,
- Error **errp);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuOpts, qemu_opts_del)
#endif
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 20872e793e..c7053cdc2b 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -27,13 +27,29 @@
#ifndef QEMU_OSDEP_H
#define QEMU_OSDEP_H
+#if !defined _FORTIFY_SOURCE && defined __OPTIMIZE__ && __OPTIMIZE__ && defined __linux__
+# define _FORTIFY_SOURCE 2
+#endif
+
#include "config-host.h"
#ifdef NEED_CPU_H
-#include "config-target.h"
+#include CONFIG_TARGET
#else
#include "exec/poison.h"
#endif
+/*
+ * HOST_WORDS_BIGENDIAN was replaced with HOST_BIG_ENDIAN. Prevent it from
+ * creeping back in.
+ */
+#pragma GCC poison HOST_WORDS_BIGENDIAN
+
+/*
+ * TARGET_WORDS_BIGENDIAN was replaced with TARGET_BIG_ENDIAN. Prevent it from
+ * creeping back in.
+ */
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+
#include "qemu/compiler.h"
/* Older versions of C++ don't get definitions of various macros from
@@ -57,13 +73,13 @@
#define daemon qemu_fake_daemon_function
#include <stdlib.h>
#undef daemon
-extern int daemon(int, int);
+QEMU_EXTERN_C int daemon(int, int);
#endif
#ifdef _WIN32
/* as defined in sdkddkver.h */
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x0600 /* Vista */
+#define _WIN32_WINNT 0x0602 /* Windows 8 API (should be >= the one from glib) */
#endif
/* reduces the number of implicitly included headers */
#ifndef WIN32_LEAN_AND_MEAN
@@ -76,6 +92,19 @@ extern int daemon(int, int);
#define __USE_MINGW_ANSI_STDIO 1
#endif
+/*
+ * We need the FreeBSD "legacy" definitions. Rust needs the FreeBSD 11 system
+ * calls since it doesn't use libc at all, so we have to emulate that despite
+ * FreeBSD 11 being EOL'd.
+ */
+#ifdef __FreeBSD__
+#define _WANT_FREEBSD11_STAT
+#define _WANT_FREEBSD11_STATFS
+#define _WANT_FREEBSD11_DIRENT
+#define _WANT_KERNEL_ERRNO
+#define _WANT_SEMUN
+#endif
+
#include <stdarg.h>
#include <stddef.h>
#include <stdbool.h>
@@ -104,8 +133,13 @@ extern int daemon(int, int);
#include <setjmp.h>
#include <signal.h>
-#ifdef HAVE_SYS_SIGNAL_H
-#include <sys/signal.h>
+#ifdef CONFIG_IOVEC
+#include <sys/uio.h>
+#endif
+
+#if defined(__linux__) && defined(__sparc__)
+/* The SPARC definition of QEMU_VMALLOC_ALIGN needs SHMLBA */
+#include <sys/shm.h>
#endif
#ifndef _WIN32
@@ -115,6 +149,17 @@ extern int daemon(int, int);
#define WEXITSTATUS(x) (x)
#endif
+#ifdef __APPLE__
+#include <AvailabilityMacros.h>
+#endif
+
+/*
+ * This is somewhat like a system header; it must be outside any extern "C"
+ * block because it includes system headers itself, including glib.h,
+ * which will not compile if inside an extern "C" block.
+ */
+#include "glib-compat.h"
+
#ifdef _WIN32
#include "sysemu/os-win32.h"
#endif
@@ -123,9 +168,72 @@ extern int daemon(int, int);
#include "sysemu/os-posix.h"
#endif
-#include "glib-compat.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "qemu/typedefs.h"
+/**
+ * Mark a function that executes in coroutine context
+ *
+ * Functions that execute in coroutine context cannot be called directly from
+ * normal functions. In the future it would be nice to enable compiler or
+ * static checker support for catching such errors. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ *
+ * For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ */
+#ifdef __clang__
+#define coroutine_fn QEMU_ANNOTATE("coroutine_fn")
+#else
+#define coroutine_fn
+#endif
+
+/**
+ * Mark a function that can suspend when executed in coroutine context,
+ * but can handle running in non-coroutine context too.
+ */
+#ifdef __clang__
+#define coroutine_mixed_fn QEMU_ANNOTATE("coroutine_mixed_fn")
+#else
+#define coroutine_mixed_fn
+#endif
+
+/**
+ * Mark a function that should not be called from a coroutine context.
+ * Usually there will be an analogous, coroutine_fn function that should
+ * be used instead.
+ *
+ * When the function is also marked as coroutine_mixed_fn, the function should
+ * only be called if the caller does not know whether it is in coroutine
+ * context.
+ *
+ * Functions that are only no_coroutine_fn, on the other hand, should not
+ * be called from within coroutines at all. This for example includes
+ * functions that block.
+ *
+ * In the future it would be nice to enable compiler or static checker
+ * support for catching such errors. This annotation is the first step
+ * towards this, and in the meantime it serves as documentation.
+ *
+ * For example:
+ *
+ * static void no_coroutine_fn foo(void) {
+ * ....
+ * }
+ */
+#ifdef __clang__
+#define no_coroutine_fn QEMU_ANNOTATE("no_coroutine_fn")
+#else
+#define no_coroutine_fn
+#endif
+
+
/*
* For mingw, as of v6.0.0, the function implementing the assert macro is
* not marked as noreturn, so the compiler cannot delete code following an
@@ -138,6 +246,31 @@ extern int daemon(int, int);
#define assert(x) g_assert(x)
#endif
+/**
+ * qemu_build_not_reached()
+ *
+ * The compiler, during optimization, is expected to prove that a call
+ * to this function cannot be reached and remove it. If the compiler
+ * supports QEMU_ERROR, this will be reported at compile time; otherwise
+ * this will be reported at link time due to the missing symbol.
+ */
+G_NORETURN
+void QEMU_ERROR("code path is reachable")
+ qemu_build_not_reached_always(void);
+#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
+#define qemu_build_not_reached() qemu_build_not_reached_always()
+#else
+#define qemu_build_not_reached() g_assert_not_reached()
+#endif
+
+/**
+ * qemu_build_assert()
+ *
+ * The compiler, during optimization, is expected to prove that the
+ * assertion is true.
+ */
+#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached()
+
/*
* According to waitpid man page:
* WCOREDUMP
@@ -173,8 +306,8 @@ extern int daemon(int, int);
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
-#ifndef MAP_FIXED_NOREPLACE
-#define MAP_FIXED_NOREPLACE 0
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE 0
#endif
#ifndef ENOMEDIUM
#define ENOMEDIUM ENODEV
@@ -192,6 +325,14 @@ extern int daemon(int, int);
#define ESHUTDOWN 4099
#endif
+#define RETRY_ON_EINTR(expr) \
+ (__extension__ \
+ ({ typeof(expr) __result; \
+ do { \
+ __result = (expr); \
+ } while (__result == -1 && errno == EINTR); \
+ __result; }))
+
/* time_t may be either 32 or 64 bits depending on the host OS, and
* can be either signed or unsigned, so we can't just hardcode a
* specific maximum value. This is not a C preprocessor constant,
@@ -222,19 +363,10 @@ extern int daemon(int, int);
#define TIME_MAX TYPE_MAXIMUM(time_t)
#endif
-/* HOST_LONG_BITS is the size of a native pointer in bits. */
-#if UINTPTR_MAX == UINT32_MAX
-# define HOST_LONG_BITS 32
-#elif UINTPTR_MAX == UINT64_MAX
-# define HOST_LONG_BITS 64
-#else
-# error Unknown pointer size
-#endif
-
/* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with
* the wrong type. Our replacement isn't usable in preprocessor
* expressions, but it is sufficient for our needs. */
-#if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX
+#ifdef HAVE_BROKEN_SIZE_MAX
#undef SIZE_MAX
#define SIZE_MAX ((size_t)-1)
#endif
@@ -255,19 +387,28 @@ extern int daemon(int, int);
* determined by the pre-processor instead of the compiler, you'll
* have to open-code it. Sadly, Coverity is severely confused by the
* constant variants, so we have to dumb things down there.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
*/
-#undef MIN
-#define MIN(a, b) \
+#define MIN_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a < _b ? _a : _b; \
})
-#undef MAX
-#define MAX(a, b) \
+#undef MIN
+#define MIN(a, b) \
+ MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+
+#define MAX_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a > _b ? _a : _b; \
})
+#undef MAX
+#define MAX(a, b) \
+ MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
#ifdef __COVERITY__
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
@@ -288,20 +429,29 @@ extern int daemon(int, int);
/*
* Minimum function that returns zero only if both values are zero.
* Intended for use with unsigned values only.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
*/
-#ifndef MIN_NON_ZERO
-#define MIN_NON_ZERO(a, b) \
+#define MIN_NON_ZERO_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
})
-#endif
+#define MIN_NON_ZERO(a, b) \
+ MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
-/* Round number down to multiple */
+/*
+ * Round number down to multiple. Safe when m is not a power of 2 (see
+ * ROUND_DOWN for a faster version when a power of 2 is guaranteed).
+ */
#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
-/* Round number up to multiple. Safe when m is not a power of 2 (see
- * ROUND_UP for a faster version when a power of 2 is guaranteed) */
+/*
+ * Round number up to multiple. Safe when m is not a power of 2 (see
+ * ROUND_UP for a faster version when a power of 2 is guaranteed).
+ */
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
/* Check if n is a multiple of m */
@@ -318,11 +468,22 @@ extern int daemon(int, int);
/* Check if pointer p is n-bytes aligned */
#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
-/* Round number up to multiple. Requires that d be a power of 2 (see
+/*
+ * Round number down to multiple. Requires that d be a power of 2 (see
+ * QEMU_ALIGN_UP for a safer but slower version on arbitrary
+ * numbers); works even if d is a smaller type than n.
+ */
+#ifndef ROUND_DOWN
+#define ROUND_DOWN(n, d) ((n) & -(0 ? (n) : (d)))
+#endif
+
+/*
+ * Round number up to multiple. Requires that d be a power of 2 (see
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
- * numbers); works even if d is a smaller type than n. */
+ * numbers); works even if d is a smaller type than n.
+ */
#ifndef ROUND_UP
-#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d)))
+#define ROUND_UP(n, d) ROUND_DOWN((n) + (d) - 1, (d))
#endif
#ifndef DIV_ROUND_UP
@@ -341,104 +502,37 @@ extern int daemon(int, int);
#endif
int qemu_daemon(int nochdir, int noclose);
-void *qemu_try_memalign(size_t alignment, size_t size);
-void *qemu_memalign(size_t alignment, size_t size);
-void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared);
-void qemu_vfree(void *ptr);
+void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared,
+ bool noreserve);
void qemu_anon_ram_free(void *ptr, size_t size);
-#define QEMU_MADV_INVALID -1
-
-#if defined(CONFIG_MADVISE)
-
-#define QEMU_MADV_WILLNEED MADV_WILLNEED
-#define QEMU_MADV_DONTNEED MADV_DONTNEED
-#ifdef MADV_DONTFORK
-#define QEMU_MADV_DONTFORK MADV_DONTFORK
-#else
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#endif
-#ifdef MADV_MERGEABLE
-#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
-#else
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_UNMERGEABLE
-#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE
-#else
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_DODUMP
-#define QEMU_MADV_DODUMP MADV_DODUMP
-#else
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#endif
-#ifdef MADV_DONTDUMP
-#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
-#else
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#endif
-#ifdef MADV_HUGEPAGE
-#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
-#else
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_NOHUGEPAGE
-#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE
-#else
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
-#endif
-#ifdef MADV_REMOVE
-#define QEMU_MADV_REMOVE MADV_REMOVE
-#else
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
-#endif
-
-#elif defined(CONFIG_POSIX_MADVISE)
-
-#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
-#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
-
-#else /* no-op */
-
-#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
-#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
-#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
-#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
-#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
-#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
-#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
-#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
-
-#endif
-
#ifdef _WIN32
#define HAVE_CHARDEV_SERIAL 1
-#elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \
+#define HAVE_CHARDEV_PARALLEL 1
+#else
+#if defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \
|| defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \
|| defined(__GLIBC__) || defined(__APPLE__)
#define HAVE_CHARDEV_SERIAL 1
#endif
-
-#if defined(__linux__) || defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__) || defined(__DragonFly__)
-#define HAVE_CHARDEV_PARPORT 1
+#if defined(__linux__) || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#define HAVE_CHARDEV_PARALLEL 1
+#endif
#endif
#if defined(__HAIKU__)
#define SIGIO SIGPOLL
#endif
+#ifdef HAVE_MADVISE_WITHOUT_PROTOTYPE
+/*
+ * See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for discussion
+ * about Solaris missing the madvise() prototype.
+ */
+int madvise(char *, size_t, int);
+#endif
+
#if defined(CONFIG_LINUX)
#ifndef BUS_MCEERR_AR
#define BUS_MCEERR_AR 4
@@ -459,10 +553,17 @@ void qemu_anon_ram_free(void *ptr, size_t size);
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096)
#elif defined(__linux__) && defined(__sparc__)
-#include <sys/shm.h>
-# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size, SHMLBA)
+# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA)
+#elif defined(__linux__) && defined(__loongarch__)
+ /*
+ * For transparent hugepage optimization, it has better be huge page
+ * aligned. LoongArch host system supports two kinds of pagesize: 4K
+ * and 16K, here calculate huge page size from host page size
+ */
+# define QEMU_VMALLOC_ALIGN (qemu_real_host_page_size() * \
+ qemu_real_host_page_size() / sizeof(long))
#else
-# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size
+# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size()
#endif
#ifdef CONFIG_POSIX
@@ -493,20 +594,23 @@ void sigaction_invoke(struct sigaction *action,
struct qemu_signalfd_siginfo *info);
#endif
-int qemu_madvise(void *addr, size_t len, int advice);
-int qemu_mprotect_rwx(void *addr, size_t size);
-int qemu_mprotect_none(void *addr, size_t size);
-
-int qemu_open(const char *name, int flags, ...);
+/*
+ * Don't introduce new usage of this function, prefer the following
+ * qemu_open/qemu_create that take an "Error **errp"
+ */
+int qemu_open_old(const char *name, int flags, ...);
+int qemu_open(const char *name, int flags, Error **errp);
+int qemu_create(const char *name, int flags, mode_t mode, Error **errp);
int qemu_close(int fd);
int qemu_unlink(const char *name);
#ifndef _WIN32
+int qemu_dup_flags(int fd, int flags);
int qemu_dup(int fd);
-#endif
int qemu_lock_fd(int fd, int64_t start, int64_t len, bool exclusive);
int qemu_unlock_fd(int fd, int64_t start, int64_t len);
int qemu_lock_fd_test(int fd, int64_t start, int64_t len, bool exclusive);
bool qemu_has_ofd_lock(void);
+#endif
#if defined(__HAIKU__) && defined(__i386__)
#define FMT_pid "%ld"
@@ -532,8 +636,6 @@ struct iovec {
ssize_t readv(int fd, const struct iovec *iov, int iov_cnt);
ssize_t writev(int fd, const struct iovec *iov, int iov_cnt);
-#else
-#include <sys/uio.h>
#endif
#ifdef _WIN32
@@ -553,49 +655,18 @@ static inline void qemu_timersub(const struct timeval *val1,
#define qemu_timersub timersub
#endif
-void qemu_set_cloexec(int fd);
-
-/* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default
- * instead of QEMU_VERSION, so setting hw_version on MachineClass
- * is no longer mandatory.
- *
- * Do NOT change this string, or it will break compatibility on all
- * machine classes that don't set hw_version.
- */
-#define QEMU_HW_VERSION "2.5+"
-
-/* QEMU "hardware version" setting. Used to replace code that exposed
- * QEMU_VERSION to guests in the past and need to keep compatibility.
- * Do not use qemu_hw_version() in new code.
- */
-void qemu_set_hw_version(const char *);
-const char *qemu_hw_version(void);
-
-void fips_set_state(bool requested);
-bool fips_get_state(void);
-
-/* Return a dynamically allocated pathname denoting a file or directory that is
- * appropriate for storing local state.
- *
- * @relative_pathname need not start with a directory separator; one will be
- * added automatically.
- *
- * The caller is responsible for releasing the value returned with g_free()
- * after use.
- */
-char *qemu_get_local_state_pathname(const char *relative_pathname);
+ssize_t qemu_write_full(int fd, const void *buf, size_t count)
+ G_GNUC_WARN_UNUSED_RESULT;
-/* Find program directory, and save it for later usage with
- * qemu_get_exec_dir().
- * Try OS specific API first, if not working, parse from argv0. */
-void qemu_init_exec_dir(const char *argv0);
+void qemu_set_cloexec(int fd);
-/* Get the saved exec dir.
+/* Return a dynamically allocated directory path that is appropriate for storing
+ * local state.
*
* The caller is responsible for releasing the value returned with g_free()
* after use.
*/
-char *qemu_get_exec_dir(void);
+char *qemu_get_local_state_dir(void);
/**
* qemu_getauxval:
@@ -608,8 +679,41 @@ unsigned long qemu_getauxval(unsigned long type);
void qemu_set_tty_echo(int fd, bool echo);
-void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus,
- Error **errp);
+typedef struct ThreadContext ThreadContext;
+
+/**
+ * qemu_prealloc_mem:
+ * @fd: the fd mapped into the area, -1 for anonymous memory
+ * @area: start address of the are to preallocate
+ * @sz: the size of the area to preallocate
+ * @max_threads: maximum number of threads to use
+ * @tc: prealloc context threads pointer, NULL if not in use
+ * @async: request asynchronous preallocation, requires @tc
+ * @errp: returns an error if this function fails
+ *
+ * Preallocate memory (populate/prefault page tables writable) for the virtual
+ * memory area starting at @area with the size of @sz. After a successful call,
+ * each page in the area was faulted in writable at least once, for example,
+ * after allocating file blocks for mapped files.
+ *
+ * When setting @async, allocation might be performed asynchronously.
+ * qemu_finish_async_prealloc_mem() must be called to finish any asynchronous
+ * preallocation.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
+ ThreadContext *tc, bool async, Error **errp);
+
+/**
+ * qemu_finish_async_prealloc_mem:
+ * @errp: returns an error if this function fails
+ *
+ * Finish all outstanding asynchronous memory preallocation.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+bool qemu_finish_async_prealloc_mem(Error **errp);
/**
* qemu_get_pid_name:
@@ -621,30 +725,18 @@ void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus,
*/
char *qemu_get_pid_name(pid_t pid);
-/**
- * qemu_fork:
- *
- * A version of fork that avoids signal handler race
- * conditions that can lead to child process getting
- * signals that are otherwise only expected by the
- * parent. It also resets all signal handlers to the
- * default settings.
- *
- * Returns 0 to child process, pid number to parent
- * or -1 on failure.
- */
-pid_t qemu_fork(Error **errp);
-
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
* when intptr_t is 32-bit and we are aligning a long long.
*/
-extern uintptr_t qemu_real_host_page_size;
-extern intptr_t qemu_real_host_page_mask;
+static inline uintptr_t qemu_real_host_page_size(void)
+{
+ return getpagesize();
+}
-extern int qemu_icache_linesize;
-extern int qemu_icache_linesize_log;
-extern int qemu_dcache_linesize;
-extern int qemu_dcache_linesize_log;
+static inline intptr_t qemu_real_host_page_mask(void)
+{
+ return -(intptr_t)qemu_real_host_page_size();
+}
/*
* After using getopt or getopt_long, if you need to parse another set
@@ -661,15 +753,20 @@ static inline void qemu_reset_optind(void)
#endif
}
+int qemu_fdatasync(int fd);
+
/**
- * qemu_get_host_name:
- * @errp: Error object
+ * Sync changes made to the memory mapped file back to the backing
+ * storage. For POSIX compliant systems this will fallback
+ * to regular msync call. Otherwise it will trigger whole file sync
+ * (including the metadata case there is no support to skip that otherwise)
*
- * Operating system agnostic way of querying host name.
- *
- * Returns allocated hostname (caller should free), NULL on failure.
+ * @addr - start of the memory area to be synced
+ * @length - length of the are to be synced
+ * @fd - file descriptor for the file to be synced
+ * (mandatory only for POSIX non-compliant systems)
*/
-char *qemu_get_host_name(Error **errp);
+int qemu_msync(void *addr, size_t length, int fd);
/**
* qemu_get_host_physmem:
@@ -683,4 +780,40 @@ char *qemu_get_host_name(Error **errp);
*/
size_t qemu_get_host_physmem(void);
+/*
+ * Toggle write/execute on the pages marked MAP_JIT
+ * for the current thread.
+ */
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+static inline void qemu_thread_jit_execute(void)
+{
+ pthread_jit_write_protect_np(true);
+}
+
+static inline void qemu_thread_jit_write(void)
+{
+ pthread_jit_write_protect_np(false);
+}
+#else
+static inline void qemu_thread_jit_write(void) {}
+static inline void qemu_thread_jit_execute(void) {}
+#endif
+
+/**
+ * Platforms which do not support system() return ENOSYS
+ */
+#ifndef HAVE_SYSTEM_FUNCTION
+#define system platform_does_not_support_system
+static inline int platform_does_not_support_system(const char *command)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif /* !HAVE_SYSTEM_FUNCTION */
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/qemu/plugin-event.h b/include/qemu/plugin-event.h
new file mode 100644
index 0000000000..7056d8427b
--- /dev/null
+++ b/include/qemu/plugin-event.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_PLUGIN_EVENT_H
+#define QEMU_PLUGIN_EVENT_H
+
+/*
+ * Events that plugins can subscribe to.
+ */
+enum qemu_plugin_event {
+ QEMU_PLUGIN_EV_VCPU_INIT,
+ QEMU_PLUGIN_EV_VCPU_EXIT,
+ QEMU_PLUGIN_EV_VCPU_TB_TRANS,
+ QEMU_PLUGIN_EV_VCPU_IDLE,
+ QEMU_PLUGIN_EV_VCPU_RESUME,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
+ QEMU_PLUGIN_EV_FLUSH,
+ QEMU_PLUGIN_EV_ATEXIT,
+ QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
+};
+
+#endif /* QEMU_PLUGIN_EVENT_H */
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
index fbbe99474b..71c1123308 100644
--- a/include/qemu/plugin-memory.h
+++ b/include/qemu/plugin-memory.h
@@ -6,21 +6,17 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef _PLUGIN_MEMORY_H_
-#define _PLUGIN_MEMORY_H_
+#ifndef PLUGIN_MEMORY_H
+#define PLUGIN_MEMORY_H
+
+#include "exec/cpu-defs.h"
+#include "exec/hwaddr.h"
struct qemu_plugin_hwaddr {
bool is_io;
bool is_store;
- union {
- struct {
- MemoryRegionSection *section;
- hwaddr offset;
- } io;
- struct {
- uint64_t hostaddr;
- } ram;
- } v;
+ hwaddr phys_addr;
+ MemoryRegion *mr;
};
/**
@@ -34,7 +30,7 @@ struct qemu_plugin_hwaddr {
* It would only fail if not called from an instrumented memory access
* which would be an abuse of the API.
*/
-bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data);
-#endif /* _PLUGIN_MEMORY_H_ */
+#endif /* PLUGIN_MEMORY_H */
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
index ab790ad105..12a96cea2a 100644
--- a/include/qemu/plugin.h
+++ b/include/qemu/plugin.h
@@ -12,22 +12,9 @@
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qemu/option.h"
-
-/*
- * Events that plugins can subscribe to.
- */
-enum qemu_plugin_event {
- QEMU_PLUGIN_EV_VCPU_INIT,
- QEMU_PLUGIN_EV_VCPU_EXIT,
- QEMU_PLUGIN_EV_VCPU_TB_TRANS,
- QEMU_PLUGIN_EV_VCPU_IDLE,
- QEMU_PLUGIN_EV_VCPU_RESUME,
- QEMU_PLUGIN_EV_VCPU_SYSCALL,
- QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
- QEMU_PLUGIN_EV_FLUSH,
- QEMU_PLUGIN_EV_ATEXIT,
- QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
-};
+#include "qemu/plugin-event.h"
+#include "exec/memopidx.h"
+#include "hw/core/cpu.h"
/*
* Option parsing/processing.
@@ -36,6 +23,25 @@ enum qemu_plugin_event {
struct qemu_plugin_desc;
typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList;
+/*
+ * Construct a qemu_plugin_meminfo_t.
+ */
+static inline qemu_plugin_meminfo_t
+make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw)
+{
+ return oi | (rw << 16);
+}
+
+/*
+ * Extract the memory operation direction from a qemu_plugin_meminfo_t.
+ * Other portions may be extracted via get_memop and get_mmuidx.
+ */
+static inline enum qemu_plugin_mem_rw
+get_plugin_meminfo_rw(qemu_plugin_meminfo_t i)
+{
+ return i >> 16;
+}
+
#ifdef CONFIG_PLUGIN
extern QemuOptsList qemu_plugin_opts;
@@ -44,8 +50,8 @@ static inline void qemu_plugin_add_opts(void)
qemu_add_opts(&qemu_plugin_opts);
}
-void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head);
-int qemu_plugin_load_list(QemuPluginList *head);
+void qemu_plugin_opt_parse(const char *optstr, QemuPluginList *head);
+int qemu_plugin_load_list(QemuPluginList *head, Error **errp);
union qemu_plugin_cb_sig {
qemu_plugin_simple_cb_t simple;
@@ -67,6 +73,7 @@ enum plugin_dyn_cb_type {
enum plugin_dyn_cb_subtype {
PLUGIN_CB_REGULAR,
+ PLUGIN_CB_REGULAR_R,
PLUGIN_CB_INLINE,
PLUGIN_N_CB_SUBTYPES,
};
@@ -79,26 +86,37 @@ enum plugin_dyn_cb_subtype {
struct qemu_plugin_dyn_cb {
union qemu_plugin_cb_sig f;
void *userp;
- unsigned tcg_flags;
enum plugin_dyn_cb_subtype type;
/* @rw applies to mem callbacks only (both regular and inline) */
enum qemu_plugin_mem_rw rw;
/* fields specific to each dyn_cb type go here */
union {
struct {
+ qemu_plugin_u64 entry;
enum qemu_plugin_op op;
uint64_t imm;
} inline_insn;
};
};
+/* Internal context for instrumenting an instruction */
struct qemu_plugin_insn {
GByteArray *data;
uint64_t vaddr;
void *haddr;
GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES];
bool calls_helpers;
+
+ /* if set, the instruction calls helpers that might access guest memory */
bool mem_helper;
+
+ bool mem_only;
+};
+
+/* A scoreboard is an array of values, indexed by vcpu_index */
+struct qemu_plugin_scoreboard {
+ GArray *data;
+ QLIST_ENTRY(qemu_plugin_scoreboard) entry;
};
/*
@@ -128,6 +146,7 @@ static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void)
return insn;
}
+/* Internal context for this TranslationBlock */
struct qemu_plugin_tb {
GPtrArray *insns;
size_t n;
@@ -135,15 +154,22 @@ struct qemu_plugin_tb {
uint64_t vaddr2;
void *haddr1;
void *haddr2;
+ bool mem_only;
+
+ /* if set, the TB calls helpers that might access guest memory */
+ bool mem_helper;
+
GArray *cbs[PLUGIN_N_CB_SUBTYPES];
};
/**
* qemu_plugin_tb_insn_get(): get next plugin record for translation.
- *
+ * @tb: the internal tb context
+ * @pc: address of instruction
*/
static inline
-struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb)
+struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb,
+ uint64_t pc)
{
struct qemu_plugin_insn *insn;
int i, j;
@@ -156,6 +182,7 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb)
g_byte_array_set_size(insn->data, 0);
insn->calls_helpers = false;
insn->mem_helper = false;
+ insn->vaddr = pc;
for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
@@ -166,6 +193,19 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb)
return insn;
}
+/**
+ * struct CPUPluginState - per-CPU state for plugins
+ * @event_mask: plugin event bitmap. Modified only via async work.
+ */
+struct CPUPluginState {
+ DECLARE_BITMAP(event_mask, QEMU_PLUGIN_EV_MAX);
+};
+
+/**
+ * qemu_plugin_create_vcpu_state: allocate plugin state
+ */
+CPUPluginState *qemu_plugin_create_vcpu_state(void);
+
void qemu_plugin_vcpu_init_hook(CPUState *cpu);
void qemu_plugin_vcpu_exit_hook(CPUState *cpu);
void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb);
@@ -177,7 +217,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1,
uint64_t a6, uint64_t a7, uint64_t a8);
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
-void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo);
+void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
+ MemOpIdx oi, enum qemu_plugin_mem_rw rw);
void qemu_plugin_flush_cb(void);
@@ -185,21 +226,51 @@ void qemu_plugin_atexit_cb(void);
void qemu_plugin_add_dyn_cb_arr(GArray *arr);
-void qemu_plugin_disable_mem_helpers(CPUState *cpu);
+static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
+{
+ cpu->plugin_mem_cbs = NULL;
+}
+
+/**
+ * qemu_plugin_user_exit(): clean-up callbacks before calling exit callbacks
+ *
+ * This is a user-mode only helper that ensure we have fully cleared
+ * callbacks from all threads before calling the exit callbacks. This
+ * is so the plugins themselves don't have to jump through hoops to
+ * guard against race conditions.
+ */
+void qemu_plugin_user_exit(void);
+
+/**
+ * qemu_plugin_user_prefork_lock(): take plugin lock before forking
+ *
+ * This is a user-mode only helper to take the internal plugin lock
+ * before a fork event. This is ensure a consistent lock state
+ */
+void qemu_plugin_user_prefork_lock(void);
+
+/**
+ * qemu_plugin_user_postfork(): reset the plugin lock
+ * @is_child: is this thread the child
+ *
+ * This user-mode only helper resets the lock state after a fork so we
+ * can continue using the plugin interface.
+ */
+void qemu_plugin_user_postfork(bool is_child);
#else /* !CONFIG_PLUGIN */
static inline void qemu_plugin_add_opts(void)
{ }
-static inline void qemu_plugin_opt_parse(const char *optarg,
+static inline void qemu_plugin_opt_parse(const char *optstr,
QemuPluginList *head)
{
error_report("plugin interface not enabled in this build");
exit(1);
}
-static inline int qemu_plugin_load_list(QemuPluginList *head)
+static inline int qemu_plugin_load_list(QemuPluginList *head, Error **errp)
{
return 0;
}
@@ -231,7 +302,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
{ }
static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
- uint32_t meminfo)
+ MemOpIdx oi,
+ enum qemu_plugin_mem_rw rw)
{ }
static inline void qemu_plugin_flush_cb(void)
@@ -247,6 +319,15 @@ void qemu_plugin_add_dyn_cb_arr(GArray *arr)
static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
{ }
+static inline void qemu_plugin_user_exit(void)
+{ }
+
+static inline void qemu_plugin_user_prefork_lock(void)
+{ }
+
+static inline void qemu_plugin_user_postfork(bool is_child)
+{ }
+
#endif /* !CONFIG_PLUGIN */
#endif /* QEMU_PLUGIN_H */
diff --git a/include/qemu/processor.h b/include/qemu/processor.h
index 8e16c9277d..9f0dcdf28f 100644
--- a/include/qemu/processor.h
+++ b/include/qemu/processor.h
@@ -7,8 +7,6 @@
#ifndef QEMU_PROCESSOR_H
#define QEMU_PROCESSOR_H
-#include "qemu/atomic.h"
-
#if defined(__i386__) || defined(__x86_64__)
# define cpu_relax() asm volatile("rep; nop" ::: "memory")
diff --git a/include/qemu/progress_meter.h b/include/qemu/progress_meter.h
index 9a23ff071c..0f2c0a32d2 100644
--- a/include/qemu/progress_meter.h
+++ b/include/qemu/progress_meter.h
@@ -27,6 +27,8 @@
#ifndef QEMU_PROGRESS_METER_H
#define QEMU_PROGRESS_METER_H
+#include "qemu/thread.h"
+
typedef struct ProgressMeter {
/**
* Current progress. The unit is arbitrary as long as the ratio between
@@ -37,22 +39,24 @@ typedef struct ProgressMeter {
/** Estimated current value at the completion of the process */
uint64_t total;
+
+ QemuMutex lock; /* protects concurrent access to above fields */
} ProgressMeter;
-static inline void progress_work_done(ProgressMeter *pm, uint64_t done)
-{
- pm->current += done;
-}
-
-static inline void progress_set_remaining(ProgressMeter *pm, uint64_t remaining)
-{
- pm->total = pm->current + remaining;
-}
-
-static inline void progress_increase_remaining(ProgressMeter *pm,
- uint64_t delta)
-{
- pm->total += delta;
-}
+void progress_init(ProgressMeter *pm);
+void progress_destroy(ProgressMeter *pm);
+
+/* Get a snapshot of internal current and total values */
+void progress_get_snapshot(ProgressMeter *pm, uint64_t *current,
+ uint64_t *total);
+
+/* Increases the amount of work done so far by @done */
+void progress_work_done(ProgressMeter *pm, uint64_t done);
+
+/* Sets how much work has to be done to complete to @remaining */
+void progress_set_remaining(ProgressMeter *pm, uint64_t remaining);
+
+/* Increases the total work to do by @delta */
+void progress_increase_remaining(ProgressMeter *pm, uint64_t delta);
#endif /* QEMU_PROGRESS_METER_H */
diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
index bab8b0d4b3..4fc6c3739b 100644
--- a/include/qemu/qemu-plugin.h
+++ b/include/qemu/qemu-plugin.h
@@ -7,9 +7,11 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef QEMU_PLUGIN_API_H
-#define QEMU_PLUGIN_API_H
+#ifndef QEMU_QEMU_PLUGIN_H
+#define QEMU_QEMU_PLUGIN_H
+
+#include <glib.h>
#include <inttypes.h>
#include <stdbool.h>
#include <stddef.h>
@@ -21,22 +23,23 @@
* https://gcc.gnu.org/wiki/Visibility
*/
#if defined _WIN32 || defined __CYGWIN__
- #ifdef BUILDING_DLL
- #define QEMU_PLUGIN_EXPORT __declspec(dllexport)
- #else
+ #ifdef CONFIG_PLUGIN
#define QEMU_PLUGIN_EXPORT __declspec(dllimport)
+ #define QEMU_PLUGIN_API __declspec(dllexport)
+ #else
+ #define QEMU_PLUGIN_EXPORT __declspec(dllexport)
+ #define QEMU_PLUGIN_API __declspec(dllimport)
#endif
#define QEMU_PLUGIN_LOCAL
#else
- #if __GNUC__ >= 4
- #define QEMU_PLUGIN_EXPORT __attribute__((visibility("default")))
- #define QEMU_PLUGIN_LOCAL __attribute__((visibility("hidden")))
- #else
- #define QEMU_PLUGIN_EXPORT
- #define QEMU_PLUGIN_LOCAL
- #endif
+ #define QEMU_PLUGIN_EXPORT __attribute__((visibility("default")))
+ #define QEMU_PLUGIN_LOCAL __attribute__((visibility("hidden")))
+ #define QEMU_PLUGIN_API
#endif
+/**
+ * typedef qemu_plugin_id_t - Unique plugin ID
+ */
typedef uint64_t qemu_plugin_id_t;
/*
@@ -48,28 +51,42 @@ typedef uint64_t qemu_plugin_id_t;
*
* The plugins export the API they were built against by exposing the
* symbol qemu_plugin_version which can be checked.
+ *
+ * version 2:
+ * - removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
+ * - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline.
+ * Those functions are replaced by *_per_vcpu variants, which guarantee
+ * thread-safety for operations.
*/
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
-#define QEMU_PLUGIN_VERSION 0
+#define QEMU_PLUGIN_VERSION 2
-typedef struct {
- /* string describing architecture */
+/**
+ * struct qemu_info_t - system information for plugins
+ *
+ * This structure provides for some limited information about the
+ * system to allow the plugin to make decisions on how to proceed. For
+ * example it might only be suitable for running on some guest
+ * architectures or when under full system emulation.
+ */
+typedef struct qemu_info_t {
+ /** @target_name: string describing architecture */
const char *target_name;
+ /** @version: minimum and current plugin API level */
struct {
int min;
int cur;
} version;
- /* is this a full system emulation? */
+ /** @system_emulation: is this a full system emulation? */
bool system_emulation;
union {
- /*
- * smp_vcpus may change if vCPUs can be hot-plugged, max_vcpus
- * is the system-wide limit.
- */
+ /** @system: information relevant to system emulation */
struct {
+ /** @system.smp_vcpus: initial number of vCPUs */
int smp_vcpus;
+ /** @system.max_vcpus: maximum possible number of vCPUs */
int max_vcpus;
} system;
};
@@ -82,31 +99,50 @@ typedef struct {
* @argc: number of arguments
* @argv: array of arguments (@argc elements)
*
- * All plugins must export this symbol.
- *
- * Note: Calling qemu_plugin_uninstall() from this function is a bug. To raise
- * an error during install, return !0.
+ * All plugins must export this symbol which is called when the plugin
+ * is first loaded. Calling qemu_plugin_uninstall() from this function
+ * is a bug.
*
* Note: @info is only live during the call. Copy any information we
- * want to keep.
+ * want to keep. @argv remains valid throughout the lifetime of the
+ * loaded plugin.
*
- * Note: @argv remains valid throughout the lifetime of the loaded plugin.
+ * Return: 0 on successful loading, !0 for an error.
*/
QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
const qemu_info_t *info,
int argc, char **argv);
-/*
- * Prototypes for the various callback styles we will be registering
- * in the following functions.
+/**
+ * typedef qemu_plugin_simple_cb_t - simple callback
+ * @id: the unique qemu_plugin_id_t
+ *
+ * This callback passes no information aside from the unique @id.
*/
typedef void (*qemu_plugin_simple_cb_t)(qemu_plugin_id_t id);
+/**
+ * typedef qemu_plugin_udata_cb_t - callback with user data
+ * @id: the unique qemu_plugin_id_t
+ * @userdata: a pointer to some user data supplied when the callback
+ * was registered.
+ */
typedef void (*qemu_plugin_udata_cb_t)(qemu_plugin_id_t id, void *userdata);
+/**
+ * typedef qemu_plugin_vcpu_simple_cb_t - vcpu callback
+ * @id: the unique qemu_plugin_id_t
+ * @vcpu_index: the current vcpu context
+ */
typedef void (*qemu_plugin_vcpu_simple_cb_t)(qemu_plugin_id_t id,
unsigned int vcpu_index);
+/**
+ * typedef qemu_plugin_vcpu_udata_cb_t - vcpu callback
+ * @vcpu_index: the current vcpu context
+ * @userdata: a pointer to some user data supplied when the callback
+ * was registered.
+ */
typedef void (*qemu_plugin_vcpu_udata_cb_t)(unsigned int vcpu_index,
void *userdata);
@@ -121,6 +157,7 @@ typedef void (*qemu_plugin_vcpu_udata_cb_t)(unsigned int vcpu_index,
*
* Note: Calling this function from qemu_plugin_install() is a bug.
*/
+QEMU_PLUGIN_API
void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
/**
@@ -134,6 +171,7 @@ void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
* Plugins are reset asynchronously, and therefore the given plugin receives
* callbacks until @cb is called.
*/
+QEMU_PLUGIN_API
void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
/**
@@ -145,6 +183,7 @@ void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb);
*
* See also: qemu_plugin_register_vcpu_exit_cb()
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb);
@@ -157,6 +196,7 @@ void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id,
*
* See also: qemu_plugin_register_vcpu_init_cb()
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb);
@@ -167,6 +207,7 @@ void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,
*
* The @cb function is called every time a vCPU idles.
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb);
@@ -177,20 +218,42 @@ void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
*
* The @cb function is called every time a vCPU resumes execution.
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb);
-/*
- * Opaque types that the plugin is given during the translation and
- * instrumentation phase.
- */
+/** struct qemu_plugin_tb - Opaque handle for a translation block */
struct qemu_plugin_tb;
+/** struct qemu_plugin_insn - Opaque handle for a translated instruction */
struct qemu_plugin_insn;
+/** struct qemu_plugin_scoreboard - Opaque handle for a scoreboard */
+struct qemu_plugin_scoreboard;
+
+/**
+ * typedef qemu_plugin_u64 - uint64_t member of an entry in a scoreboard
+ *
+ * This field allows to access a specific uint64_t member in one given entry,
+ * located at a specified offset. Inline operations expect this as entry.
+ */
+typedef struct {
+ struct qemu_plugin_scoreboard *score;
+ size_t offset;
+} qemu_plugin_u64;
+/**
+ * enum qemu_plugin_cb_flags - type of callback
+ *
+ * @QEMU_PLUGIN_CB_NO_REGS: callback does not access the CPU's regs
+ * @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs
+ * @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs
+ *
+ * Note: currently QEMU_PLUGIN_CB_RW_REGS is unused, plugins cannot change
+ * system register state.
+ */
enum qemu_plugin_cb_flags {
- QEMU_PLUGIN_CB_NO_REGS, /* callback does not access the CPU's regs */
- QEMU_PLUGIN_CB_R_REGS, /* callback reads the CPU's regs */
- QEMU_PLUGIN_CB_RW_REGS, /* callback reads and writes the CPU's regs */
+ QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_CB_R_REGS,
+ QEMU_PLUGIN_CB_RW_REGS,
};
enum qemu_plugin_mem_rw {
@@ -200,6 +263,14 @@ enum qemu_plugin_mem_rw {
};
/**
+ * typedef qemu_plugin_vcpu_tb_trans_cb_t - translation callback
+ * @id: unique plugin id
+ * @tb: opaque handle used for querying and instrumenting a block.
+ */
+typedef void (*qemu_plugin_vcpu_tb_trans_cb_t)(qemu_plugin_id_t id,
+ struct qemu_plugin_tb *tb);
+
+/**
* qemu_plugin_register_vcpu_tb_trans_cb() - register a translate cb
* @id: plugin ID
* @cb: callback function
@@ -211,14 +282,12 @@ enum qemu_plugin_mem_rw {
* callbacks to be triggered when the block or individual instruction
* executes.
*/
-typedef void (*qemu_plugin_vcpu_tb_trans_cb_t)(qemu_plugin_id_t id,
- struct qemu_plugin_tb *tb);
-
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_tb_trans_cb_t cb);
/**
- * qemu_plugin_register_vcpu_tb_trans_exec_cb() - register execution callback
+ * qemu_plugin_register_vcpu_tb_exec_cb() - register execution callback
* @tb: the opaque qemu_plugin_tb handle for the translation
* @cb: callback function
* @flags: does the plugin read or write the CPU's registers?
@@ -226,29 +295,39 @@ void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
*
* The @cb function is called every time a translated unit executes.
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
qemu_plugin_vcpu_udata_cb_t cb,
enum qemu_plugin_cb_flags flags,
void *userdata);
+/**
+ * enum qemu_plugin_op - describes an inline op
+ *
+ * @QEMU_PLUGIN_INLINE_ADD_U64: add an immediate value uint64_t
+ *
+ * Note: currently only a single inline op is supported.
+ */
+
enum qemu_plugin_op {
QEMU_PLUGIN_INLINE_ADD_U64,
};
/**
- * qemu_plugin_register_vcpu_tb_trans_exec_inline() - execution inline op
+ * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op
* @tb: the opaque qemu_plugin_tb handle for the translation
* @op: the type of qemu_plugin_op (e.g. ADD_U64)
- * @ptr: the target memory location for the op
+ * @entry: entry to run op
* @imm: the op data (e.g. 1)
*
- * Insert an inline op to every time a translated unit executes.
- * Useful if you just want to increment a single counter somewhere in
- * memory.
+ * Insert an inline op on a given scoreboard entry.
*/
-void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb,
- enum qemu_plugin_op op,
- void *ptr, uint64_t imm);
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ struct qemu_plugin_tb *tb,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
/**
* qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb
@@ -259,61 +338,147 @@ void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb,
*
* The @cb function is called every time an instruction is executed
*/
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
qemu_plugin_vcpu_udata_cb_t cb,
enum qemu_plugin_cb_flags flags,
void *userdata);
/**
- * qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op
+ * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op
* @insn: the opaque qemu_plugin_insn handle for an instruction
- * @cb: callback function
* @op: the type of qemu_plugin_op (e.g. ADD_U64)
- * @ptr: the target memory location for the op
+ * @entry: entry to run op
* @imm: the op data (e.g. 1)
*
- * Insert an inline op to every time an instruction executes. Useful
- * if you just want to increment a single counter somewhere in memory.
+ * Insert an inline op to every time an instruction executes.
*/
-void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn,
- enum qemu_plugin_op op,
- void *ptr, uint64_t imm);
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ struct qemu_plugin_insn *insn,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
-/*
- * Helpers to query information about the instructions in a block
+/**
+ * qemu_plugin_tb_n_insns() - query helper for number of insns in TB
+ * @tb: opaque handle to TB passed to callback
+ *
+ * Returns: number of instructions in this block
*/
+QEMU_PLUGIN_API
size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb);
+/**
+ * qemu_plugin_tb_vaddr() - query helper for vaddr of TB start
+ * @tb: opaque handle to TB passed to callback
+ *
+ * Returns: virtual address of block start
+ */
+QEMU_PLUGIN_API
uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb);
+/**
+ * qemu_plugin_tb_get_insn() - retrieve handle for instruction
+ * @tb: opaque handle to TB passed to callback
+ * @idx: instruction number, 0 indexed
+ *
+ * The returned handle can be used in follow up helper queries as well
+ * as when instrumenting an instruction. It is only valid for the
+ * lifetime of the callback.
+ *
+ * Returns: opaque handle to instruction
+ */
+QEMU_PLUGIN_API
struct qemu_plugin_insn *
qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx);
+/**
+ * qemu_plugin_insn_data() - return ptr to instruction data
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Note: data is only valid for duration of callback. See
+ * qemu_plugin_insn_size() to calculate size of stream.
+ *
+ * Returns: pointer to a stream of bytes containing the value of this
+ * instructions opcode.
+ */
+QEMU_PLUGIN_API
const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn);
+/**
+ * qemu_plugin_insn_size() - return size of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: size of instruction in bytes
+ */
+QEMU_PLUGIN_API
size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn);
+/**
+ * qemu_plugin_insn_vaddr() - return vaddr of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: virtual address of instruction
+ */
+QEMU_PLUGIN_API
uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn);
+
+/**
+ * qemu_plugin_insn_haddr() - return hardware addr of instruction
+ * @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
+ *
+ * Returns: hardware (physical) target address of instruction
+ */
+QEMU_PLUGIN_API
void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn);
-/*
- * Memory Instrumentation
+/**
+ * typedef qemu_plugin_meminfo_t - opaque memory transaction handle
*
- * The anonymous qemu_plugin_meminfo_t and qemu_plugin_hwaddr types
- * can be used in queries to QEMU to get more information about a
- * given memory access.
+ * This can be further queried using the qemu_plugin_mem_* query
+ * functions.
*/
typedef uint32_t qemu_plugin_meminfo_t;
+/** struct qemu_plugin_hwaddr - opaque hw address handle */
struct qemu_plugin_hwaddr;
-/* meminfo queries */
+/**
+ * qemu_plugin_mem_size_shift() - get size of access
+ * @info: opaque memory transaction handle
+ *
+ * Returns: size of access in ^2 (0=byte, 1=16bit, 2=32bit etc...)
+ */
+QEMU_PLUGIN_API
unsigned int qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_sign_extended() - was the access sign extended
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_big_endian() - was the access big endian
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info);
+/**
+ * qemu_plugin_mem_is_store() - was the access a store
+ * @info: opaque memory transaction handle
+ *
+ * Returns: true if it was, otherwise false
+ */
+QEMU_PLUGIN_API
bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info);
-/*
- * qemu_plugin_get_hwaddr():
+/**
+ * qemu_plugin_get_hwaddr() - return handle for memory operation
+ * @info: opaque memory info structure
* @vaddr: the virtual address of the memory operation
*
* For system emulation returns a qemu_plugin_hwaddr handle to query
@@ -324,34 +489,102 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info);
* information about the handle should be recovered before the
* callback returns.
*/
+QEMU_PLUGIN_API
struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
uint64_t vaddr);
/*
- * The following additional queries can be run on the hwaddr structure
- * to return information about it. For non-IO accesses the device
- * offset will be into the appropriate block of RAM.
+ * The following additional queries can be run on the hwaddr structure to
+ * return information about it - namely whether it is for an IO access and the
+ * physical address associated with the access.
*/
+
+/**
+ * qemu_plugin_hwaddr_is_io() - query whether memory operation is IO
+ * @haddr: address handle from qemu_plugin_get_hwaddr()
+ *
+ * Returns true if the handle's memory operation is to memory-mapped IO, or
+ * false if it is to RAM
+ */
+QEMU_PLUGIN_API
bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr);
-uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr);
-typedef void
-(*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index,
- qemu_plugin_meminfo_t info, uint64_t vaddr,
- void *userdata);
+/**
+ * qemu_plugin_hwaddr_phys_addr() - query physical address for memory operation
+ * @haddr: address handle from qemu_plugin_get_hwaddr()
+ *
+ * Returns the physical address associated with the memory operation
+ *
+ * Note that the returned physical address may not be unique if you are dealing
+ * with multiple address spaces.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr);
+
+/*
+ * Returns a string representing the device. The string is valid for
+ * the lifetime of the plugin.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h);
+
+/**
+ * typedef qemu_plugin_vcpu_mem_cb_t - memory callback function type
+ * @vcpu_index: the executing vCPU
+ * @info: an opaque handle for further queries about the memory
+ * @vaddr: the virtual address of the transaction
+ * @userdata: any user data attached to the callback
+ */
+typedef void (*qemu_plugin_vcpu_mem_cb_t) (unsigned int vcpu_index,
+ qemu_plugin_meminfo_t info,
+ uint64_t vaddr,
+ void *userdata);
+/**
+ * qemu_plugin_register_vcpu_mem_cb() - register memory access callback
+ * @insn: handle for instruction to instrument
+ * @cb: callback of type qemu_plugin_vcpu_mem_cb_t
+ * @flags: (currently unused) callback flags
+ * @rw: monitor reads, writes or both
+ * @userdata: opaque pointer for userdata
+ *
+ * This registers a full callback for every memory access generated by
+ * an instruction. If the instruction doesn't access memory no
+ * callback will be made.
+ *
+ * The callback reports the vCPU the access took place on, the virtual
+ * address of the access and a handle for further queries. The user
+ * can attach some userdata to the callback for additional purposes.
+ *
+ * Other execution threads will continue to execute during the
+ * callback so the plugin is responsible for ensuring it doesn't get
+ * confused by making appropriate use of locking if required.
+ */
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
qemu_plugin_vcpu_mem_cb_t cb,
enum qemu_plugin_cb_flags flags,
enum qemu_plugin_mem_rw rw,
void *userdata);
-void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
- enum qemu_plugin_mem_rw rw,
- enum qemu_plugin_op op, void *ptr,
- uint64_t imm);
-
-
+/**
+ * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access
+ * @insn: handle for instruction to instrument
+ * @rw: apply to reads, writes or both
+ * @op: the op, of type qemu_plugin_op
+ * @entry: entry to run op
+ * @imm: immediate data for @op
+ *
+ * This registers a inline op every memory access generated by the
+ * instruction.
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ struct qemu_plugin_insn *insn,
+ enum qemu_plugin_mem_rw rw,
+ enum qemu_plugin_op op,
+ qemu_plugin_u64 entry,
+ uint64_t imm);
typedef void
(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index,
@@ -359,6 +592,7 @@ typedef void
uint64_t a3, uint64_t a4, uint64_t a5,
uint64_t a6, uint64_t a7, uint64_t a8);
+QEMU_PLUGIN_API
void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_syscall_cb_t cb);
@@ -366,6 +600,7 @@ typedef void
(*qemu_plugin_vcpu_syscall_ret_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_idx,
int64_t num, int64_t ret);
+QEMU_PLUGIN_API
void
qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_syscall_ret_cb_t cb);
@@ -378,9 +613,20 @@ qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,
* Returns an allocated string containing the disassembly
*/
+QEMU_PLUGIN_API
char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn);
/**
+ * qemu_plugin_insn_symbol() - best effort symbol lookup
+ * @insn: instruction reference
+ *
+ * Return a static string referring to the symbol. This is dependent
+ * on the binary QEMU is running having provided a symbol table.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn);
+
+/**
* qemu_plugin_vcpu_for_each() - iterate over the existing vCPU
* @id: plugin ID
* @cb: callback function
@@ -389,25 +635,206 @@ char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn);
*
* See also: qemu_plugin_register_vcpu_init_cb()
*/
+QEMU_PLUGIN_API
void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb);
+QEMU_PLUGIN_API
void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
qemu_plugin_simple_cb_t cb);
+/**
+ * qemu_plugin_register_atexit_cb() - register exit callback
+ * @id: plugin ID
+ * @cb: callback
+ * @userdata: user data for callback
+ *
+ * The @cb function is called once execution has finished. Plugins
+ * should be able to free all their resources at this point much like
+ * after a reset/uninstall callback is called.
+ *
+ * In user-mode it is possible a few un-instrumented instructions from
+ * child threads may run before the host kernel reaps the threads.
+ */
+QEMU_PLUGIN_API
void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
qemu_plugin_udata_cb_t cb, void *userdata);
-/* returns -1 in user-mode */
-int qemu_plugin_n_vcpus(void);
-
-/* returns -1 in user-mode */
-int qemu_plugin_n_max_vcpus(void);
+/* returns how many vcpus were started at this point */
+int qemu_plugin_num_vcpus(void);
/**
* qemu_plugin_outs() - output string via QEMU's logging system
* @string: a string
*/
+QEMU_PLUGIN_API
void qemu_plugin_outs(const char *string);
-#endif /* QEMU_PLUGIN_API_H */
+/**
+ * qemu_plugin_bool_parse() - parses a boolean argument in the form of
+ * "<argname>=[on|yes|true|off|no|false]"
+ *
+ * @name: argument name, the part before the equals sign
+ * @val: argument value, what's after the equals sign
+ * @ret: output return value
+ *
+ * returns true if the combination @name=@val parses correctly to a boolean
+ * argument, and false otherwise
+ */
+QEMU_PLUGIN_API
+bool qemu_plugin_bool_parse(const char *name, const char *val, bool *ret);
+
+/**
+ * qemu_plugin_path_to_binary() - path to binary file being executed
+ *
+ * Return a string representing the path to the binary. For user-mode
+ * this is the main executable. For system emulation we currently
+ * return NULL. The user should g_free() the string once no longer
+ * needed.
+ */
+QEMU_PLUGIN_API
+const char *qemu_plugin_path_to_binary(void);
+
+/**
+ * qemu_plugin_start_code() - returns start of text segment
+ *
+ * Returns the nominal start address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_start_code(void);
+
+/**
+ * qemu_plugin_end_code() - returns end of text segment
+ *
+ * Returns the nominal end address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_end_code(void);
+
+/**
+ * qemu_plugin_entry_code() - returns start address for module
+ *
+ * Returns the nominal entry address of the main text segment in
+ * user-mode. Currently returns 0 for system emulation.
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_entry_code(void);
+
+/** struct qemu_plugin_register - Opaque handle for register access */
+struct qemu_plugin_register;
+
+/**
+ * typedef qemu_plugin_reg_descriptor - register descriptions
+ *
+ * @handle: opaque handle for retrieving value with qemu_plugin_read_register
+ * @name: register name
+ * @feature: optional feature descriptor, can be NULL
+ */
+typedef struct {
+ struct qemu_plugin_register *handle;
+ const char *name;
+ const char *feature;
+} qemu_plugin_reg_descriptor;
+
+/**
+ * qemu_plugin_get_registers() - return register list for current vCPU
+ *
+ * Returns a potentially empty GArray of qemu_plugin_reg_descriptor.
+ * Caller frees the array (but not the const strings).
+ *
+ * Should be used from a qemu_plugin_register_vcpu_init_cb() callback
+ * after the vCPU is initialised, i.e. in the vCPU context.
+ */
+QEMU_PLUGIN_API
+GArray *qemu_plugin_get_registers(void);
+
+/**
+ * qemu_plugin_read_register() - read register for current vCPU
+ *
+ * @handle: a @qemu_plugin_reg_handle handle
+ * @buf: A GByteArray for the data owned by the plugin
+ *
+ * This function is only available in a context that register read access is
+ * explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag.
+ *
+ * Returns the size of the read register. The content of @buf is in target byte
+ * order. On failure returns -1.
+ */
+QEMU_PLUGIN_API
+int qemu_plugin_read_register(struct qemu_plugin_register *handle,
+ GByteArray *buf);
+
+/**
+ * qemu_plugin_scoreboard_new() - alloc a new scoreboard
+ *
+ * @element_size: size (in bytes) for one entry
+ *
+ * Returns a pointer to a new scoreboard. It must be freed using
+ * qemu_plugin_scoreboard_free.
+ */
+QEMU_PLUGIN_API
+struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size);
+
+/**
+ * qemu_plugin_scoreboard_free() - free a scoreboard
+ * @score: scoreboard to free
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_scoreboard_free(struct qemu_plugin_scoreboard *score);
+
+/**
+ * qemu_plugin_scoreboard_find() - get pointer to an entry of a scoreboard
+ * @score: scoreboard to query
+ * @vcpu_index: entry index
+ *
+ * Returns address of entry of a scoreboard matching a given vcpu_index. This
+ * address can be modified later if scoreboard is resized.
+ */
+QEMU_PLUGIN_API
+void *qemu_plugin_scoreboard_find(struct qemu_plugin_scoreboard *score,
+ unsigned int vcpu_index);
+
+/* Macros to define a qemu_plugin_u64 */
+#define qemu_plugin_scoreboard_u64(score) \
+ (qemu_plugin_u64) {score, 0}
+#define qemu_plugin_scoreboard_u64_in_struct(score, type, member) \
+ (qemu_plugin_u64) {score, offsetof(type, member)}
+
+/**
+ * qemu_plugin_u64_add() - add a value to a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ * @added: value to add
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_u64_add(qemu_plugin_u64 entry, unsigned int vcpu_index,
+ uint64_t added);
+
+/**
+ * qemu_plugin_u64_get() - get value of a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_u64_get(qemu_plugin_u64 entry, unsigned int vcpu_index);
+
+/**
+ * qemu_plugin_u64_set() - set value of a qemu_plugin_u64 for a given vcpu
+ * @entry: entry to query
+ * @vcpu_index: entry index
+ * @val: new value
+ */
+QEMU_PLUGIN_API
+void qemu_plugin_u64_set(qemu_plugin_u64 entry, unsigned int vcpu_index,
+ uint64_t val);
+
+/**
+ * qemu_plugin_u64_sum() - return sum of all vcpu entries in a scoreboard
+ * @entry: entry to sum
+ */
+QEMU_PLUGIN_API
+uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry);
+
+#endif /* QEMU_QEMU_PLUGIN_H */
diff --git a/include/qemu/qemu-print.h b/include/qemu/qemu-print.h
index 40b596262f..1b70920648 100644
--- a/include/qemu/qemu-print.h
+++ b/include/qemu/qemu-print.h
@@ -13,11 +13,11 @@
#ifndef QEMU_PRINT_H
#define QEMU_PRINT_H
-int qemu_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
-int qemu_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+int qemu_vprintf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
+int qemu_printf(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
int qemu_vfprintf(FILE *stream, const char *fmt, va_list ap)
- GCC_FMT_ATTR(2, 0);
-int qemu_fprintf(FILE *stream, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+ G_GNUC_PRINTF(2, 0);
+int qemu_fprintf(FILE *stream, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
#endif
diff --git a/include/qemu/qemu-progress.h b/include/qemu/qemu-progress.h
new file mode 100644
index 0000000000..137e1c316f
--- /dev/null
+++ b/include/qemu/qemu-progress.h
@@ -0,0 +1,8 @@
+#ifndef QEMU_PROGRESS_H
+#define QEMU_PROGRESS_H
+
+void qemu_progress_init(int enabled, float min_skip);
+void qemu_progress_end(void);
+void qemu_progress_print(float delta, int max);
+
+#endif /* QEMU_PROGRESS_H */
diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h
new file mode 100644
index 0000000000..dc2b14d258
--- /dev/null
+++ b/include/qemu/qtree.h
@@ -0,0 +1,200 @@
+/*
+ * GLIB - Library of useful routines for C programming
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
+ * file for a list of people on the GLib Team. See the ChangeLog
+ * files for a list of changes. These files are distributed with
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
+ */
+
+/*
+ * QTree is a partial import of Glib's GTree. The parts excluded correspond
+ * to API calls either deprecated (e.g. g_tree_traverse) or recently added
+ * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
+ *
+ * The reason for this import is to allow us to control the memory allocator
+ * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
+ * slice allocator, which causes problems when forking in user-mode;
+ * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
+ * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
+ *
+ * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
+ */
+
+#ifndef QEMU_QTREE_H
+#define QEMU_QTREE_H
+
+
+#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
+
+typedef struct _QTree QTree;
+
+typedef struct _QTreeNode QTreeNode;
+
+typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
+ gpointer user_data);
+
+/*
+ * Balanced binary trees
+ */
+QTree *q_tree_new(GCompareFunc key_compare_func);
+QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data);
+QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data,
+ GDestroyNotify key_destroy_func,
+ GDestroyNotify value_destroy_func);
+QTree *q_tree_ref(QTree *tree);
+void q_tree_unref(QTree *tree);
+void q_tree_destroy(QTree *tree);
+void q_tree_insert(QTree *tree,
+ gpointer key,
+ gpointer value);
+void q_tree_replace(QTree *tree,
+ gpointer key,
+ gpointer value);
+gboolean q_tree_remove(QTree *tree,
+ gconstpointer key);
+gboolean q_tree_steal(QTree *tree,
+ gconstpointer key);
+gpointer q_tree_lookup(QTree *tree,
+ gconstpointer key);
+gboolean q_tree_lookup_extended(QTree *tree,
+ gconstpointer lookup_key,
+ gpointer *orig_key,
+ gpointer *value);
+void q_tree_foreach(QTree *tree,
+ GTraverseFunc func,
+ gpointer user_data);
+gpointer q_tree_search(QTree *tree,
+ GCompareFunc search_func,
+ gconstpointer user_data);
+gint q_tree_height(QTree *tree);
+gint q_tree_nnodes(QTree *tree);
+
+#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
+
+typedef GTree QTree;
+typedef GTreeNode QTreeNode;
+typedef GTraverseNodeFunc QTraverseNodeFunc;
+
+static inline QTree *q_tree_new(GCompareFunc key_compare_func)
+{
+ return g_tree_new(key_compare_func);
+}
+
+static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data)
+{
+ return g_tree_new_with_data(key_compare_func, key_compare_data);
+}
+
+static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
+ gpointer key_compare_data,
+ GDestroyNotify key_destroy_func,
+ GDestroyNotify value_destroy_func)
+{
+ return g_tree_new_full(key_compare_func, key_compare_data,
+ key_destroy_func, value_destroy_func);
+}
+
+static inline QTree *q_tree_ref(QTree *tree)
+{
+ return g_tree_ref(tree);
+}
+
+static inline void q_tree_unref(QTree *tree)
+{
+ g_tree_unref(tree);
+}
+
+static inline void q_tree_destroy(QTree *tree)
+{
+ g_tree_destroy(tree);
+}
+
+static inline void q_tree_insert(QTree *tree,
+ gpointer key,
+ gpointer value)
+{
+ g_tree_insert(tree, key, value);
+}
+
+static inline void q_tree_replace(QTree *tree,
+ gpointer key,
+ gpointer value)
+{
+ g_tree_replace(tree, key, value);
+}
+
+static inline gboolean q_tree_remove(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_remove(tree, key);
+}
+
+static inline gboolean q_tree_steal(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_steal(tree, key);
+}
+
+static inline gpointer q_tree_lookup(QTree *tree,
+ gconstpointer key)
+{
+ return g_tree_lookup(tree, key);
+}
+
+static inline gboolean q_tree_lookup_extended(QTree *tree,
+ gconstpointer lookup_key,
+ gpointer *orig_key,
+ gpointer *value)
+{
+ return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
+}
+
+static inline void q_tree_foreach(QTree *tree,
+ GTraverseFunc func,
+ gpointer user_data)
+{
+ return g_tree_foreach(tree, func, user_data);
+}
+
+static inline gpointer q_tree_search(QTree *tree,
+ GCompareFunc search_func,
+ gconstpointer user_data)
+{
+ return g_tree_search(tree, search_func, user_data);
+}
+
+static inline gint q_tree_height(QTree *tree)
+{
+ return g_tree_height(tree);
+}
+
+static inline gint q_tree_nnodes(QTree *tree)
+{
+ return g_tree_nnodes(tree);
+}
+
+#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
+
+#endif /* QEMU_QTREE_H */
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 456a5b01ee..e029e7bf66 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -218,12 +218,12 @@ struct { \
typeof(elm) save_sle_next; \
do { \
save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
- } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
save_sle_next); \
} while (/*CONSTCOND*/0)
#define QSLIST_MOVE_ATOMIC(dest, src) do { \
- (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD(head, field) do { \
@@ -376,7 +376,8 @@ struct { \
/*
* Simple queue access methods.
*/
-#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
diff --git a/include/qemu/range.h b/include/qemu/range.h
index f62b363e0d..205e1da76d 100644
--- a/include/qemu/range.h
+++ b/include/qemu/range.h
@@ -114,8 +114,8 @@ static inline uint64_t range_upb(Range *range)
* @size may be 0. If the range would overflow, returns -ERANGE, otherwise
* 0.
*/
-static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob,
- uint64_t size)
+G_GNUC_WARN_UNUSED_RESULT
+static inline int range_init(Range *range, uint64_t lob, uint64_t size)
{
if (lob + size < lob) {
return -ERANGE;
@@ -217,6 +217,20 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1,
return !(last2 < first1 || last1 < first2);
}
+/*
+ * Return -1 if @a < @b, 1 @a > @b, and 0 if they touch or overlap.
+ * Both @a and @b must not be empty.
+ */
+int range_compare(Range *a, Range *b);
+
GList *range_list_insert(GList *list, Range *data);
+/*
+ * Inverse an array of sorted ranges over the [low, high] span, ie.
+ * original ranges becomes holes in the newly allocated inv_ranges
+ */
+void range_inverse_array(GList *in_ranges,
+ GList **out_ranges,
+ uint64_t low, uint64_t high);
+
#endif
diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h
index 01da8d63f1..48bf59e857 100644
--- a/include/qemu/ratelimit.h
+++ b/include/qemu/ratelimit.h
@@ -14,9 +14,11 @@
#ifndef QEMU_RATELIMIT_H
#define QEMU_RATELIMIT_H
+#include "qemu/lockable.h"
#include "qemu/timer.h"
typedef struct {
+ QemuMutex lock;
int64_t slice_start_time;
int64_t slice_end_time;
uint64_t slice_quota;
@@ -40,7 +42,12 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
double delay_slices;
- assert(limit->slice_quota && limit->slice_ns);
+ QEMU_LOCK_GUARD(&limit->lock);
+ if (!limit->slice_quota) {
+ /* Throttling disabled. */
+ return 0;
+ }
+ assert(limit->slice_ns);
if (limit->slice_end_time < now) {
/* Previous, possibly extended, time slice finished; reset the
@@ -65,11 +72,26 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
return limit->slice_end_time - now;
}
+static inline void ratelimit_init(RateLimit *limit)
+{
+ qemu_mutex_init(&limit->lock);
+}
+
+static inline void ratelimit_destroy(RateLimit *limit)
+{
+ qemu_mutex_destroy(&limit->lock);
+}
+
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
+ QEMU_LOCK_GUARD(&limit->lock);
limit->slice_ns = slice_ns;
- limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
+ if (speed == 0) {
+ limit->slice_quota = 0;
+ } else {
+ limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
+ }
}
#endif
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index 570aa603eb..fea058aa9f 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -27,11 +27,9 @@
#include "qemu/thread.h"
#include "qemu/queue.h"
#include "qemu/atomic.h"
+#include "qemu/notify.h"
#include "qemu/sys_membarrier.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include "qemu/coroutine-tls.h"
/*
* Important !
@@ -66,29 +64,39 @@ struct rcu_reader_data {
/* Data used for registry, protected by rcu_registry_lock */
QLIST_ENTRY(rcu_reader_data) node;
+
+ /*
+ * NotifierList used to force an RCU grace period. Accessed under
+ * rcu_registry_lock. Note that the notifier is called _outside_
+ * the thread!
+ */
+ NotifierList force_rcu;
};
-extern __thread struct rcu_reader_data rcu_reader;
+QEMU_DECLARE_CO_TLS(struct rcu_reader_data, rcu_reader)
static inline void rcu_read_lock(void)
{
- struct rcu_reader_data *p_rcu_reader = &rcu_reader;
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
unsigned ctr;
if (p_rcu_reader->depth++ > 0) {
return;
}
- ctr = atomic_read(&rcu_gp_ctr);
- atomic_set(&p_rcu_reader->ctr, ctr);
+ ctr = qatomic_read(&rcu_gp_ctr);
+ qatomic_set(&p_rcu_reader->ctr, ctr);
- /* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
+ /*
+ * Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
+ * RCU-protected pointers.
+ */
smp_mb_placeholder();
}
static inline void rcu_read_unlock(void)
{
- struct rcu_reader_data *p_rcu_reader = &rcu_reader;
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
assert(p_rcu_reader->depth != 0);
if (--p_rcu_reader->depth > 0) {
@@ -100,29 +108,29 @@ static inline void rcu_read_unlock(void)
* smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
* are sequentially consistent.
*/
- atomic_store_release(&p_rcu_reader->ctr, 0);
+ qatomic_store_release(&p_rcu_reader->ctr, 0);
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
smp_mb_placeholder();
- if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
- atomic_set(&p_rcu_reader->waiting, false);
+ if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
+ qatomic_set(&p_rcu_reader->waiting, false);
qemu_event_set(&rcu_gp_event);
}
}
-extern void synchronize_rcu(void);
+void synchronize_rcu(void);
/*
* Reader thread registration.
*/
-extern void rcu_register_thread(void);
-extern void rcu_unregister_thread(void);
+void rcu_register_thread(void);
+void rcu_unregister_thread(void);
/*
* Support for fork(). fork() support is enabled at startup.
*/
-extern void rcu_enable_atfork(void);
-extern void rcu_disable_atfork(void);
+void rcu_enable_atfork(void);
+void rcu_disable_atfork(void);
struct rcu_head;
typedef void RCUCBFunc(struct rcu_head *head);
@@ -132,7 +140,8 @@ struct rcu_head {
RCUCBFunc *func;
};
-extern void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
+void call_rcu1(struct rcu_head *head, RCUCBFunc *func);
+void drain_call_rcu(void);
/* The operands of the minus operator must have the same type,
* which must be the one that we specify in the cast.
@@ -179,8 +188,11 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
#define RCU_READ_LOCK_GUARD() \
g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock()
-#ifdef __cplusplus
-}
-#endif
+/*
+ * Force-RCU notifiers tell readers that they should exit their
+ * read-side critical section.
+ */
+void rcu_add_force_rcu_notifier(Notifier *n);
+void rcu_remove_force_rcu_notifier(Notifier *n);
#endif /* QEMU_RCU_H */
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
index 558961cc27..4e6298d473 100644
--- a/include/qemu/rcu_queue.h
+++ b/include/qemu/rcu_queue.h
@@ -28,17 +28,12 @@
#include "qemu/queue.h"
#include "qemu/atomic.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
/*
* List access methods.
*/
-#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
-#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
-#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
+#define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL)
+#define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first))
+#define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next))
/*
* List functions.
@@ -46,7 +41,7 @@ extern "C" {
/*
- * The difference between atomic_read/set and atomic_rcu_read/set
+ * The difference between qatomic_read/set and qatomic_rcu_read/set
* is in the including of a read/write memory barrier to the volatile
* access. atomic_rcu_* macros include the memory barrier, the
* plain atomic macros do not. Therefore, it should be correct to
@@ -66,7 +61,7 @@ extern "C" {
#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
(elm)->field.le_next = (listelm)->field.le_next; \
(elm)->field.le_prev = &(listelm)->field.le_next; \
- atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.le_next, (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -82,7 +77,7 @@ extern "C" {
#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
- atomic_rcu_set((listelm)->field.le_prev, (elm)); \
+ qatomic_rcu_set((listelm)->field.le_prev, (elm)); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
@@ -95,7 +90,7 @@ extern "C" {
#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.le_prev = &(head)->lh_first; \
(elm)->field.le_next = (head)->lh_first; \
- atomic_rcu_set((&(head)->lh_first), (elm)); \
+ qatomic_rcu_set((&(head)->lh_first), (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -112,20 +107,20 @@ extern "C" {
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
} \
- atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
+ qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \
} while (/*CONSTCOND*/0)
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->lh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->lh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.le_next))
+ (var) = qatomic_rcu_read(&(var)->field.le_next))
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
- for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
+ for ((var) = (qatomic_rcu_read(&(head)->lh_first)); \
(var) && \
- ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
+ ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \
(var) = (next_var))
/*
@@ -133,9 +128,10 @@ extern "C" {
*/
/* Simple queue access methods */
-#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL)
-#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first)
-#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next)
+#define QSIMPLEQ_EMPTY_RCU(head) \
+ (qatomic_read(&(head)->sqh_first) == NULL)
+#define QSIMPLEQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->sqh_first)
+#define QSIMPLEQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sqe_next)
/* Simple queue functions */
#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -143,12 +139,12 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(head)->sqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->sqh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
- atomic_rcu_set((head)->sqh_last, (elm)); \
+ qatomic_rcu_set((head)->sqh_last, (elm)); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
@@ -157,11 +153,11 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
- atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
+ qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\
if ((head)->sqh_first == NULL) { \
(head)->sqh_last = &(head)->sqh_first; \
} \
@@ -175,7 +171,7 @@ extern "C" {
while (curr->field.sqe_next != (elm)) { \
curr = curr->field.sqe_next; \
} \
- atomic_set(&curr->field.sqe_next, \
+ qatomic_set(&curr->field.sqe_next, \
curr->field.sqe_next->field.sqe_next); \
if (curr->field.sqe_next == NULL) { \
(head)->sqh_last = &(curr)->field.sqe_next; \
@@ -184,13 +180,13 @@ extern "C" {
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.sqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.sqe_next))
#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\
(var) = (next))
/*
@@ -198,9 +194,9 @@ extern "C" {
*/
/* Tail queue access methods */
-#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL)
-#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first)
-#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next)
+#define QTAILQ_EMPTY_RCU(head) (qatomic_read(&(head)->tqh_first) == NULL)
+#define QTAILQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->tqh_first)
+#define QTAILQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.tqe_next)
/* Tail queue functions */
#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -211,14 +207,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(head)->tqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_first, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
- atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -230,14 +226,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_next = (listelm); \
- atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\
(listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -248,18 +244,19 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
} \
- atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
+ qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, \
+ (elm)->field.tqe_next); \
(elm)->field.tqe_circ.tql_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.tqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.tqe_next))
#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\
(var) = (next))
/*
@@ -267,23 +264,23 @@ extern "C" {
*/
/* Singly-linked list access methods */
-#define QSLIST_EMPTY_RCU(head) (atomic_read(&(head)->slh_first) == NULL)
-#define QSLIST_FIRST_RCU(head) atomic_rcu_read(&(head)->slh_first)
-#define QSLIST_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sle_next)
+#define QSLIST_EMPTY_RCU(head) (qatomic_read(&(head)->slh_first) == NULL)
+#define QSLIST_FIRST_RCU(head) qatomic_rcu_read(&(head)->slh_first)
+#define QSLIST_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sle_next)
/* Singly-linked list functions */
#define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
- atomic_rcu_set(&(head)->slh_first, (elm)); \
+ qatomic_rcu_set(&(head)->slh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
(elm)->field.sle_next = (listelm)->field.sle_next; \
- atomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
- atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \
+ qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
@@ -294,22 +291,19 @@ extern "C" {
while (curr->field.sle_next != (elm)) { \
curr = curr->field.sle_next; \
} \
- atomic_set(&curr->field.sle_next, \
+ qatomic_set(&curr->field.sle_next, \
curr->field.sle_next->field.sle_next); \
} \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->slh_first); \
- (var); \
- (var) = atomic_rcu_read(&(var)->field.sle_next))
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var); \
+ (var) = qatomic_rcu_read(&(var)->field.sle_next))
-#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->slh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \
+#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \
(var) = (next))
-#ifdef __cplusplus
-}
-#endif
#endif /* QEMU_RCU_QUEUE_H */
diff --git a/include/qemu/readline.h b/include/qemu/readline.h
index e81258322b..b05e4782da 100644
--- a/include/qemu/readline.h
+++ b/include/qemu/readline.h
@@ -5,7 +5,7 @@
#define READLINE_MAX_CMDS 64
#define READLINE_MAX_COMPLETIONS 256
-typedef void GCC_FMT_ATTR(2, 3) ReadLinePrintfFunc(void *opaque,
+typedef void G_GNUC_PRINTF(2, 3) ReadLinePrintfFunc(void *opaque,
const char *fmt, ...);
typedef void ReadLineFlushFunc(void *opaque);
typedef void ReadLineFunc(void *opaque, const char *str,
@@ -44,6 +44,8 @@ typedef struct ReadLineState {
} ReadLineState;
void readline_add_completion(ReadLineState *rs, const char *str);
+void readline_add_completion_of(ReadLineState *rs,
+ const char *pfx, const char *str);
void readline_set_completion_index(ReadLineState *rs, int completion_index);
const char *readline_get_history(ReadLineState *rs, unsigned int index);
diff --git a/include/qemu/reserved-region.h b/include/qemu/reserved-region.h
new file mode 100644
index 0000000000..8e6f0a97e2
--- /dev/null
+++ b/include/qemu/reserved-region.h
@@ -0,0 +1,32 @@
+/*
+ * QEMU ReservedRegion helpers
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_RESERVED_REGION_H
+#define QEMU_RESERVED_REGION_H
+
+#include "exec/memory.h"
+
+/*
+ * Insert a new region into a sorted list of reserved regions. In case
+ * there is overlap with existing regions, the new added region has
+ * higher priority and replaces the overlapped segment.
+ */
+GList *resv_region_list_insert(GList *list, ReservedRegion *reg);
+
+#endif
diff --git a/include/qemu/selfmap.h b/include/qemu/selfmap.h
index 8382c4c779..1690a74f4b 100644
--- a/include/qemu/selfmap.h
+++ b/include/qemu/selfmap.h
@@ -6,12 +6,13 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef _SELFMAP_H_
-#define _SELFMAP_H_
+#ifndef SELFMAP_H
+#define SELFMAP_H
+
+#include "qemu/interval-tree.h"
typedef struct {
- unsigned long start;
- unsigned long end;
+ IntervalTreeNode itree;
/* flags */
bool is_read;
@@ -19,26 +20,25 @@ typedef struct {
bool is_exec;
bool is_priv;
- unsigned long offset;
- gchar *dev;
- uint64_t inode;
- gchar *path;
+ dev_t dev;
+ ino_t inode;
+ uint64_t offset;
+ const char *path;
} MapInfo;
-
/**
* read_self_maps:
*
- * Read /proc/self/maps and return a list of MapInfo structures.
+ * Read /proc/self/maps and return a tree of MapInfo structures.
*/
-GSList *read_self_maps(void);
+IntervalTreeRoot *read_self_maps(void);
/**
* free_self_maps:
- * @info: a GSlist
+ * @info: an interval tree
*
- * Free a list of MapInfo structures.
+ * Free a tree of MapInfo structures.
*/
-void free_self_maps(GSList *info);
+void free_self_maps(IntervalTreeRoot *root);
-#endif /* _SELFMAP_H_ */
+#endif /* SELFMAP_H */
diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
index 8b6b4ee4bb..ecb7d2c864 100644
--- a/include/qemu/seqlock.h
+++ b/include/qemu/seqlock.h
@@ -32,7 +32,7 @@ static inline void seqlock_init(QemuSeqLock *sl)
/* Lock out other writers and update the count. */
static inline void seqlock_write_begin(QemuSeqLock *sl)
{
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
/* Write sequence before updating other fields. */
smp_wmb();
@@ -43,7 +43,7 @@ static inline void seqlock_write_end(QemuSeqLock *sl)
/* Write other fields before finalizing sequence. */
smp_wmb();
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
}
/* Lock out other writers and update the count. */
@@ -68,7 +68,7 @@ static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock
static inline unsigned seqlock_read_begin(const QemuSeqLock *sl)
{
/* Always fail if a write is in progress. */
- unsigned ret = atomic_read(&sl->sequence);
+ unsigned ret = qatomic_read(&sl->sequence);
/* Read sequence before reading other fields. */
smp_rmb();
@@ -79,7 +79,7 @@ static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
{
/* Read other fields before reading final sequence. */
smp_rmb();
- return unlikely(atomic_read(&sl->sequence) != start);
+ return unlikely(qatomic_read(&sl->sequence) != start);
}
#endif
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 7d1f813576..d935fd80da 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -14,12 +14,41 @@ int inet_aton(const char *cp, struct in_addr *ia);
/* misc helpers */
bool fd_is_socket(int fd);
int qemu_socket(int domain, int type, int protocol);
+
+/**
+ * qemu_socketpair:
+ * @domain: specifies a communication domain, such as PF_UNIX
+ * @type: specifies the socket type.
+ * @protocol: specifies a particular protocol to be used with the socket
+ * @sv: an array to store the pair of socket created
+ *
+ * Creates an unnamed pair of connected sockets in the specified domain,
+ * of the specified type, and using the optionally specified protocol.
+ * And automatically set the close-on-exec flags on the returned sockets
+ *
+ * Return 0 on success.
+ */
+int qemu_socketpair(int domain, int type, int protocol, int sv[2]);
+
int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen);
+/*
+ * A variant of send(2) which handles partial send.
+ *
+ * Return the number of bytes transferred over the socket.
+ * Set errno if fewer than `count' bytes are sent.
+ *
+ * This function don't work with non-blocking socket's.
+ * Any of the possibilities with non-blocking socket's is bad:
+ * - return a short write (then name is wrong)
+ * - busy wait adding (errno == EAGAIN) to the loop
+ */
+ssize_t qemu_send_full(int s, const void *buf, size_t count)
+ G_GNUC_WARN_UNUSED_RESULT;
int socket_set_cork(int fd, int v);
int socket_set_nodelay(int fd);
-void qemu_set_block(int fd);
-int qemu_try_set_nonblock(int fd);
-void qemu_set_nonblock(int fd);
+void qemu_socket_set_block(int fd);
+int qemu_socket_try_set_nonblock(int fd);
+void qemu_socket_set_nonblock(int fd);
int socket_set_fast_reuse(int fd);
#ifdef WIN32
@@ -40,6 +69,7 @@ NetworkAddressFamily inet_netfamily(int family);
int unix_listen(const char *path, Error **errp);
int unix_connect(const char *path, Error **errp);
+char *socket_uri(SocketAddress *addr);
SocketAddress *socket_parse(const char *str, Error **errp);
int socket_connect(SocketAddress *addr, Error **errp);
int socket_listen(SocketAddress *addr, int num, Error **errp);
@@ -47,6 +77,8 @@ void socket_listen_cleanup(int fd, Error **errp);
int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp);
/* Old, ipv4 only bits. Don't use for new code. */
+int convert_host_port(struct sockaddr_in *saddr, const char *host,
+ const char *port, Error **errp);
int parse_host_port(struct sockaddr_in *saddr, const char *str,
Error **errp);
int socket_init(void);
@@ -111,4 +143,14 @@ SocketAddress *socket_remote_address(int fd, Error **errp);
*/
SocketAddress *socket_address_flatten(SocketAddressLegacy *addr);
+/**
+ * socket_address_parse_named_fd:
+ *
+ * Modify @addr, replacing a named fd by its corresponding number.
+ * Needed for callers that plan to pass @addr to a context where the
+ * current monitor is not available.
+ *
+ * Return 0 on success.
+ */
+int socket_address_parse_named_fd(SocketAddress *addr, Error **errp);
#endif /* QEMU_SOCKETS_H */
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h
index 19a5ac4c56..99b5cb724a 100644
--- a/include/qemu/stats64.h
+++ b/include/qemu/stats64.h
@@ -21,7 +21,7 @@
typedef struct Stat64 {
#ifdef CONFIG_ATOMIC64
- uint64_t value;
+ aligned_uint64_t value;
#else
uint32_t low, high;
uint32_t lock;
@@ -37,31 +37,37 @@ static inline void stat64_init(Stat64 *s, uint64_t value)
static inline uint64_t stat64_get(const Stat64 *s)
{
- return atomic_read__nocheck(&s->value);
+ return qatomic_read__nocheck(&s->value);
+}
+
+static inline void stat64_set(Stat64 *s, uint64_t value)
+{
+ qatomic_set__nocheck(&s->value, value);
}
static inline void stat64_add(Stat64 *s, uint64_t value)
{
- atomic_add(&s->value, value);
+ qatomic_add(&s->value, value);
}
static inline void stat64_min(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig > value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
static inline void stat64_max(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig < value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
#else
uint64_t stat64_get(const Stat64 *s);
+void stat64_set(Stat64 *s, uint64_t value);
bool stat64_min_slow(Stat64 *s, uint64_t value);
bool stat64_max_slow(Stat64 *s, uint64_t value);
bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
@@ -79,7 +85,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
low = (uint32_t) value;
if (!low) {
if (high) {
- atomic_add(&s->high, high);
+ qatomic_add(&s->high, high);
}
return;
}
@@ -101,7 +107,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
* the high 32 bits, so it can race just fine with stat64_add32_carry
* and even stat64_get!
*/
- old = atomic_cmpxchg(&s->low, orig, result);
+ old = qatomic_cmpxchg(&s->low, orig, result);
if (orig == old) {
return;
}
@@ -116,7 +122,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -128,7 +134,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* the write barrier in stat64_min_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low <= low) {
return;
}
@@ -138,7 +144,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -156,7 +162,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
@@ -168,7 +174,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* the write barrier in stat64_max_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low >= low) {
return;
}
@@ -178,7 +184,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
diff --git a/include/qemu/sys_membarrier.h b/include/qemu/sys_membarrier.h
index b5bfa21d52..e7774891f8 100644
--- a/include/qemu/sys_membarrier.h
+++ b/include/qemu/sys_membarrier.h
@@ -14,8 +14,8 @@
* side. The slow side forces processor-level ordering on all other cores
* through a system call.
*/
-extern void smp_mb_global_init(void);
-extern void smp_mb_global(void);
+void smp_mb_global_init(void);
+void smp_mb_global(void);
#define smp_mb_placeholder() barrier()
#else
/* Keep it simple, execute a real memory barrier on both sides. */
diff --git a/include/qemu/thread-context.h b/include/qemu/thread-context.h
new file mode 100644
index 0000000000..2ebd6b7fe1
--- /dev/null
+++ b/include/qemu/thread-context.h
@@ -0,0 +1,57 @@
+/*
+ * QEMU Thread Context
+ *
+ * Copyright Red Hat Inc., 2022
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef SYSEMU_THREAD_CONTEXT_H
+#define SYSEMU_THREAD_CONTEXT_H
+
+#include "qapi/qapi-types-machine.h"
+#include "qemu/thread.h"
+#include "qom/object.h"
+
+#define TYPE_THREAD_CONTEXT "thread-context"
+OBJECT_DECLARE_TYPE(ThreadContext, ThreadContextClass,
+ THREAD_CONTEXT)
+
+struct ThreadContextClass {
+ ObjectClass parent_class;
+};
+
+struct ThreadContext {
+ /* private */
+ Object parent;
+
+ /* private */
+ unsigned int thread_id;
+ QemuThread thread;
+
+ /* Semaphore to wait for context thread action. */
+ QemuSemaphore sem;
+ /* Semaphore to wait for action in context thread. */
+ QemuSemaphore sem_thread;
+ /* Mutex to synchronize requests. */
+ QemuMutex mutex;
+
+ /* Commands for the thread to execute. */
+ int thread_cmd;
+ void *thread_cmd_data;
+
+ /* CPU affinity bitmap used for initialization. */
+ unsigned long *init_cpu_bitmap;
+ int init_cpu_nbits;
+};
+
+void thread_context_create_thread(ThreadContext *tc, QemuThread *thread,
+ const char *name,
+ void *(*start_routine)(void *), void *arg,
+ int mode);
+
+#endif /* SYSEMU_THREAD_CONTEXT_H */
diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h
index c903525062..5f2f3d1386 100644
--- a/include/qemu/thread-posix.h
+++ b/include/qemu/thread-posix.h
@@ -4,12 +4,6 @@
#include <pthread.h>
#include <semaphore.h>
-typedef QemuMutex QemuRecMutex;
-#define qemu_rec_mutex_destroy qemu_mutex_destroy
-#define qemu_rec_mutex_lock_impl qemu_mutex_lock_impl
-#define qemu_rec_mutex_trylock_impl qemu_mutex_trylock_impl
-#define qemu_rec_mutex_unlock qemu_mutex_unlock
-
struct QemuMutex {
pthread_mutex_t lock;
#ifdef CONFIG_DEBUG_MUTEX
@@ -19,20 +13,23 @@ struct QemuMutex {
bool initialized;
};
+/*
+ * QemuRecMutex cannot be a typedef of QemuMutex lest we have two
+ * compatible cases in _Generic. See qemu/lockable.h.
+ */
+typedef struct QemuRecMutex {
+ QemuMutex m;
+} QemuRecMutex;
+
struct QemuCond {
pthread_cond_t cond;
bool initialized;
};
struct QemuSemaphore {
-#ifndef CONFIG_SEM_TIMEDWAIT
- pthread_mutex_t lock;
- pthread_cond_t cond;
+ QemuMutex mutex;
+ QemuCond cond;
unsigned int count;
-#else
- sem_t sem;
-#endif
- bool initialized;
};
struct QemuEvent {
diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h
index d0a1a9597e..d95af4498f 100644
--- a/include/qemu/thread-win32.h
+++ b/include/qemu/thread-win32.h
@@ -18,12 +18,6 @@ struct QemuRecMutex {
bool initialized;
};
-void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
-void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
-int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file,
- int line);
-void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
-
struct QemuCond {
CONDITION_VARIABLE var;
bool initialized;
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 4baf4d1715..fb74e21c08 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -3,6 +3,7 @@
#include "qemu/processor.h"
#include "qemu/atomic.h"
+#include "qemu/clang-tsa.h"
typedef struct QemuCond QemuCond;
typedef struct QemuSemaphore QemuSemaphore;
@@ -24,9 +25,18 @@ typedef struct QemuThread QemuThread;
void qemu_mutex_init(QemuMutex *mutex);
void qemu_mutex_destroy(QemuMutex *mutex);
-int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
-void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
-void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
+int TSA_NO_TSA qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+void TSA_NO_TSA qemu_mutex_lock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+void TSA_NO_TSA qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file,
+ const int line);
+
+void qemu_rec_mutex_init(QemuRecMutex *mutex);
+void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
+void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
+int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
+void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
@@ -37,7 +47,7 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
const char *f, int l);
-extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
+extern QemuMutexLockFunc bql_mutex_lock_func;
extern QemuMutexLockFunc qemu_mutex_lock_func;
extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
@@ -70,33 +80,33 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
#else
#define qemu_mutex_lock(m) ({ \
- QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
+ QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
_f(m, __FILE__, __LINE__); \
})
-#define qemu_mutex_trylock(m) ({ \
- QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_mutex_trylock(m) ({ \
+ QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
+ _f(m, __FILE__, __LINE__); \
})
-#define qemu_rec_mutex_lock(m) ({ \
- QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_rec_mutex_lock(m) ({ \
+ QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
+ _f(m, __FILE__, __LINE__); \
})
#define qemu_rec_mutex_trylock(m) ({ \
QemuRecMutexTrylockFunc _f; \
- _f = atomic_read(&qemu_rec_mutex_trylock_func); \
+ _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
_f(m, __FILE__, __LINE__); \
})
#define qemu_cond_wait(c, m) ({ \
- QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
+ QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
_f(c, m, __FILE__, __LINE__); \
})
#define qemu_cond_timedwait(c, m, ms) ({ \
- QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \
+ QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
_f(c, m, ms, __FILE__, __LINE__); \
})
#endif
@@ -104,6 +114,9 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
#define qemu_mutex_unlock(mutex) \
qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
+#define qemu_rec_mutex_unlock(mutex) \
+ qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
+
static inline void (qemu_mutex_lock)(QemuMutex *mutex)
{
qemu_mutex_lock(mutex);
@@ -129,8 +142,10 @@ static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
return qemu_rec_mutex_trylock(mutex);
}
-/* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
-void qemu_rec_mutex_init(QemuRecMutex *mutex);
+static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
+{
+ qemu_rec_mutex_unlock(mutex);
+}
void qemu_cond_init(QemuCond *cond);
void qemu_cond_destroy(QemuCond *cond);
@@ -142,8 +157,8 @@ void qemu_cond_destroy(QemuCond *cond);
*/
void qemu_cond_signal(QemuCond *cond);
void qemu_cond_broadcast(QemuCond *cond);
-void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
- const char *file, const int line);
+void TSA_NO_TSA qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
+ const char *file, const int line);
bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
const char *file, const int line);
@@ -174,10 +189,14 @@ void qemu_event_destroy(QemuEvent *ev);
void qemu_thread_create(QemuThread *thread, const char *name,
void *(*start_routine)(void *),
void *arg, int mode);
+int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
+ unsigned long nbits);
+int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
+ unsigned long *nbits);
void *qemu_thread_join(QemuThread *thread);
void qemu_thread_get_self(QemuThread *thread);
bool qemu_thread_is_self(QemuThread *thread);
-void qemu_thread_exit(void *retval) QEMU_NORETURN;
+G_NORETURN void qemu_thread_exit(void *retval);
void qemu_thread_naming(bool enable);
struct Notifier;
@@ -216,17 +235,16 @@ struct QemuSpin {
static inline void qemu_spin_init(QemuSpin *spin)
{
- __sync_lock_release(&spin->value);
+ qatomic_set(&spin->value, 0);
#ifdef CONFIG_TSAN
__tsan_mutex_create(spin, __tsan_mutex_not_static);
#endif
}
-/* const parameter because the only purpose here is the TSAN annotation */
-static inline void qemu_spin_destroy(const QemuSpin *spin)
+static inline void qemu_spin_destroy(QemuSpin *spin)
{
#ifdef CONFIG_TSAN
- __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
+ __tsan_mutex_destroy(spin, __tsan_mutex_not_static);
#endif
}
@@ -235,8 +253,8 @@ static inline void qemu_spin_lock(QemuSpin *spin)
#ifdef CONFIG_TSAN
__tsan_mutex_pre_lock(spin, 0);
#endif
- while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
- while (atomic_read(&spin->value)) {
+ while (unlikely(qatomic_xchg(&spin->value, 1))) {
+ while (qatomic_read(&spin->value)) {
cpu_relax();
}
}
@@ -250,7 +268,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
#ifdef CONFIG_TSAN
__tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
#endif
- bool busy = __sync_lock_test_and_set(&spin->value, true);
+ bool busy = qatomic_xchg(&spin->value, true);
#ifdef CONFIG_TSAN
unsigned flags = __tsan_mutex_try_lock;
flags |= busy ? __tsan_mutex_try_lock_failed : 0;
@@ -261,7 +279,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
static inline bool qemu_spin_locked(QemuSpin *spin)
{
- return atomic_read(&spin->value);
+ return qatomic_read(&spin->value);
}
static inline void qemu_spin_unlock(QemuSpin *spin)
@@ -269,7 +287,7 @@ static inline void qemu_spin_unlock(QemuSpin *spin)
#ifdef CONFIG_TSAN
__tsan_mutex_pre_unlock(spin, 0);
#endif
- __sync_lock_release(&spin->value);
+ qatomic_store_release(&spin->value, 0);
#ifdef CONFIG_TSAN
__tsan_mutex_post_unlock(spin, 0);
#endif
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
index 05f6346137..181245d29b 100644
--- a/include/qemu/throttle.h
+++ b/include/qemu/throttle.h
@@ -99,13 +99,18 @@ typedef struct ThrottleState {
int64_t previous_leak; /* timestamp of the last leak done */
} ThrottleState;
+typedef enum {
+ THROTTLE_READ = 0,
+ THROTTLE_WRITE,
+ THROTTLE_MAX
+} ThrottleDirection;
+
typedef struct ThrottleTimers {
- QEMUTimer *timers[2]; /* timers used to do the throttling */
+ QEMUTimer *timers[THROTTLE_MAX]; /* timers used to do the throttling */
QEMUClockType clock_type; /* the clock used */
/* Callbacks */
- QEMUTimerCB *read_timer_cb;
- QEMUTimerCB *write_timer_cb;
+ QEMUTimerCB *timer_cb[THROTTLE_MAX];
void *timer_opaque;
} ThrottleTimers;
@@ -149,9 +154,10 @@ void throttle_config_init(ThrottleConfig *cfg);
/* usage */
bool throttle_schedule_timer(ThrottleState *ts,
ThrottleTimers *tt,
- bool is_write);
+ ThrottleDirection direction);
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
+ uint64_t size);
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
Error **errp);
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 6a8b48b5a9..9a366e551f 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -166,8 +166,8 @@ bool qemu_clock_expired(QEMUClockType type);
*
* Determine whether a clock should be used for deadline
* calculations. Some clocks, for instance vm_clock with
- * use_icount set, do not count in nanoseconds. Such clocks
- * are not used for deadline calculations, and are presumed
+ * icount_enabled() set, do not count in nanoseconds.
+ * Such clocks are not used for deadline calculations, and are presumed
* to interrupt any poll using qemu_notify/aio_notify
* etc.
*
@@ -225,13 +225,6 @@ void qemu_clock_notify(QEMUClockType type);
void qemu_clock_enable(QEMUClockType type, bool enabled);
/**
- * qemu_start_warp_timer:
- *
- * Starts a timer for virtual clock update
- */
-void qemu_start_warp_timer(void);
-
-/**
* qemu_clock_run_timers:
* @type: clock on which to operate
*
@@ -527,7 +520,7 @@ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group,
int scale, int attributes,
QEMUTimerCB *cb, void *opaque)
{
- QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
+ QEMUTimer *ts = g_new0(QEMUTimer, 1);
timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque);
return ts;
}
@@ -617,17 +610,6 @@ static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
void timer_deinit(QEMUTimer *ts);
/**
- * timer_free:
- * @ts: the timer
- *
- * Free a timer (it must not be on the active list)
- */
-static inline void timer_free(QEMUTimer *ts)
-{
- g_free(ts);
-}
-
-/**
* timer_del:
* @ts: the timer
*
@@ -639,6 +621,21 @@ static inline void timer_free(QEMUTimer *ts)
void timer_del(QEMUTimer *ts);
/**
+ * timer_free:
+ * @ts: the timer
+ *
+ * Free a timer. This will call timer_del() for you to remove
+ * the timer from the active list if it was still active.
+ */
+static inline void timer_free(QEMUTimer *ts)
+{
+ if (ts) {
+ timer_del(ts);
+ g_free(ts);
+ }
+}
+
+/**
* timer_mod_ns:
* @ts: the timer
* @expire_time: the expiry time in nanoseconds
@@ -679,7 +676,7 @@ void timer_mod(QEMUTimer *ts, int64_t expire_timer);
/**
* timer_mod_anticipate:
* @ts: the timer
- * @expire_time: the expiry time in nanoseconds
+ * @expire_time: the expire time in the units associated with the timer
*
* Modify a timer to expire at @expire_time or the current time, whichever
* comes earlier, taking into account the scale associated with the timer.
@@ -791,12 +788,6 @@ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
*/
void init_clocks(QEMUTimerListNotifyCB *notify_cb);
-int64_t cpu_get_ticks(void);
-/* Caller must hold BQL */
-void cpu_enable_ticks(void);
-/* Caller must hold BQL */
-void cpu_disable_ticks(void);
-
static inline int64_t get_max_clock_jump(void)
{
/* This should be small enough to prevent excessive interrupts from being
@@ -819,6 +810,8 @@ static inline int64_t get_clock_realtime(void)
return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
}
+extern int64_t clock_start;
+
/* Warning: don't insert tracepoints into these functions, they are
also used by simpletrace backend and tracepoints would cause
an infinite recursion! */
@@ -850,13 +843,6 @@ static inline int64_t get_clock(void)
}
#endif
-/* icount */
-int64_t cpu_get_icount_raw(void);
-int64_t cpu_get_icount(void);
-int64_t cpu_get_clock(void);
-int64_t cpu_icount_to_ns(int64_t icount);
-void cpu_update_icount(CPUState *cpu);
-
/*******************************************/
/* host CPU ticks (if available) */
@@ -993,6 +979,28 @@ static inline int64_t cpu_get_host_ticks(void)
return cur - ofs;
}
+#elif defined(__riscv) && __riscv_xlen == 32
+static inline int64_t cpu_get_host_ticks(void)
+{
+ uint32_t lo, hi, tmph;
+ do {
+ asm volatile("RDTIMEH %0\n\t"
+ "RDTIME %1\n\t"
+ "RDTIMEH %2"
+ : "=r"(hi), "=r"(lo), "=r"(tmph));
+ } while (unlikely(tmph != hi));
+ return lo | (uint64_t)hi << 32;
+}
+
+#elif defined(__riscv) && __riscv_xlen > 32
+static inline int64_t cpu_get_host_ticks(void)
+{
+ int64_t val;
+
+ asm volatile("RDTIME %0" : "=r"(val));
+ return val;
+}
+
#else
/* The host CPU doesn't have an easily accessible cycle counter.
Just return a monotonically increasing value. This will be
@@ -1003,13 +1011,4 @@ static inline int64_t cpu_get_host_ticks(void)
}
#endif
-#ifdef CONFIG_PROFILER
-static inline int64_t profile_getclock(void)
-{
- return get_clock();
-}
-
-extern int64_t dev_time;
-#endif
-
#endif
diff --git a/include/qemu/transactions.h b/include/qemu/transactions.h
new file mode 100644
index 0000000000..2f2060acd9
--- /dev/null
+++ b/include/qemu/transactions.h
@@ -0,0 +1,66 @@
+/*
+ * Simple transactions API
+ *
+ * Copyright (c) 2021 Virtuozzo International GmbH.
+ *
+ * Author:
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * = Generic transaction API =
+ *
+ * The intended usage is the following: you create "prepare" functions, which
+ * represents the actions. They will usually have Transaction* argument, and
+ * call tran_add() to register finalization callbacks. For finalization
+ * callbacks, prepare corresponding TransactionActionDrv structures.
+ *
+ * Then, when you need to make a transaction, create an empty Transaction by
+ * tran_create(), call your "prepare" functions on it, and finally call
+ * tran_abort() or tran_commit() to finalize the transaction by corresponding
+ * finalization actions in reverse order.
+ *
+ * The clean() functions registered by the drivers in a transaction are called
+ * last, after all abort() or commit() functions have been called.
+ */
+
+#ifndef QEMU_TRANSACTIONS_H
+#define QEMU_TRANSACTIONS_H
+
+#include <gmodule.h>
+
+typedef struct TransactionActionDrv {
+ void (*abort)(void *opaque);
+ void (*commit)(void *opaque);
+ void (*clean)(void *opaque);
+} TransactionActionDrv;
+
+typedef struct Transaction Transaction;
+
+Transaction *tran_new(void);
+void tran_add(Transaction *tran, TransactionActionDrv *drv, void *opaque);
+void tran_abort(Transaction *tran);
+void tran_commit(Transaction *tran);
+
+static inline void tran_finalize(Transaction *tran, int ret)
+{
+ if (ret < 0) {
+ tran_abort(tran);
+ } else {
+ tran_commit(tran);
+ }
+}
+
+#endif /* QEMU_TRANSACTIONS_H */
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 427027a970..50c277cf0b 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -21,11 +21,14 @@
* Incomplete struct types
* Please keep this list in case-insensitive alphabetical order.
*/
+typedef struct AccelCPUState AccelCPUState;
+typedef struct AccelState AccelState;
typedef struct AdapterInfo AdapterInfo;
typedef struct AddressSpace AddressSpace;
typedef struct AioContext AioContext;
typedef struct Aml Aml;
typedef struct AnnounceTimer AnnounceTimer;
+typedef struct ArchCPU ArchCPU;
typedef struct BdrvDirtyBitmap BdrvDirtyBitmap;
typedef struct BdrvDirtyBitmapIter BdrvDirtyBitmapIter;
typedef struct BlockBackend BlockBackend;
@@ -34,15 +37,22 @@ typedef struct BlockDriverState BlockDriverState;
typedef struct BusClass BusClass;
typedef struct BusState BusState;
typedef struct Chardev Chardev;
+typedef struct Clock Clock;
typedef struct CompatProperty CompatProperty;
-typedef struct CoMutex CoMutex;
+typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUAddressSpace CPUAddressSpace;
+typedef struct CPUArchState CPUArchState;
+typedef struct CPUPluginState CPUPluginState;
+typedef struct CpuInfoFast CpuInfoFast;
+typedef struct CPUJumpCache CPUJumpCache;
typedef struct CPUState CPUState;
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceListener DeviceListener;
typedef struct DeviceState DeviceState;
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
typedef struct DisplayChangeListener DisplayChangeListener;
typedef struct DriveInfo DriveInfo;
+typedef struct DumpState DumpState;
typedef struct Error Error;
typedef struct EventNotifier EventNotifier;
typedef struct FlatView FlatView;
@@ -50,6 +60,7 @@ typedef struct FWCfgEntry FWCfgEntry;
typedef struct FWCfgIoState FWCfgIoState;
typedef struct FWCfgMemState FWCfgMemState;
typedef struct FWCfgState FWCfgState;
+typedef struct GraphicHwOps GraphicHwOps;
typedef struct HostMemoryBackend HostMemoryBackend;
typedef struct I2CBus I2CBus;
typedef struct I2SCodec I2SCodec;
@@ -57,8 +68,8 @@ typedef struct IOMMUMemoryRegion IOMMUMemoryRegion;
typedef struct ISABus ISABus;
typedef struct ISADevice ISADevice;
typedef struct IsaDma IsaDma;
+typedef struct JSONWriter JSONWriter;
typedef struct MACAddr MACAddr;
-typedef struct ReservedRegion ReservedRegion;
typedef struct MachineClass MachineClass;
typedef struct MachineState MachineState;
typedef struct MemoryListener MemoryListener;
@@ -86,11 +97,13 @@ typedef struct PCIEAERLog PCIEAERLog;
typedef struct PCIEAERMsg PCIEAERMsg;
typedef struct PCIEPort PCIEPort;
typedef struct PCIESlot PCIESlot;
+typedef struct PCIESriovPF PCIESriovPF;
+typedef struct PCIESriovVF PCIESriovVF;
typedef struct PCIExpressDevice PCIExpressDevice;
typedef struct PCIExpressHost PCIExpressHost;
typedef struct PCIHostDeviceAddress PCIHostDeviceAddress;
typedef struct PCIHostState PCIHostState;
-typedef struct PCMachineState PCMachineState;
+typedef struct PICCommonState PICCommonState;
typedef struct PostcopyDiscardState PostcopyDiscardState;
typedef struct Property Property;
typedef struct PropertyInfo PropertyInfo;
@@ -98,6 +111,7 @@ typedef struct QBool QBool;
typedef struct QDict QDict;
typedef struct QEMUBH QEMUBH;
typedef struct QemuConsole QemuConsole;
+typedef struct QEMUCursor QEMUCursor;
typedef struct QEMUFile QEMUFile;
typedef struct QemuLockable QemuLockable;
typedef struct QemuMutex QemuMutex;
@@ -108,7 +122,6 @@ typedef struct QEMUSGList QEMUSGList;
typedef struct QemuSpin QemuSpin;
typedef struct QEMUTimer QEMUTimer;
typedef struct QEMUTimerListGroup QEMUTimerListGroup;
-typedef struct QJSON QJSON;
typedef struct QList QList;
typedef struct QNull QNull;
typedef struct QNum QNum;
@@ -116,9 +129,12 @@ typedef struct QObject QObject;
typedef struct QString QString;
typedef struct RAMBlock RAMBlock;
typedef struct Range Range;
-typedef struct SavedIOTLB SavedIOTLB;
+typedef struct ReservedRegion ReservedRegion;
typedef struct SHPCDevice SHPCDevice;
typedef struct SSIBus SSIBus;
+typedef struct TCGCPUOps TCGCPUOps;
+typedef struct TCGHelperInfo TCGHelperInfo;
+typedef struct TranslationBlock TranslationBlock;
typedef struct VirtIODevice VirtIODevice;
typedef struct Visitor Visitor;
typedef struct VMChangeStateEntry VMChangeStateEntry;
@@ -135,8 +151,6 @@ typedef struct IRQState *qemu_irq;
/*
* Function types
*/
-typedef void SaveStateHandler(QEMUFile *f, void *opaque);
-typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
typedef void (*qemu_irq_handler)(void *opaque, int n, int level);
#endif /* QEMU_TYPEDEFS_H */
diff --git a/include/qemu/uri.h b/include/qemu/uri.h
index d201c61260..255e61f452 100644
--- a/include/qemu/uri.h
+++ b/include/qemu/uri.h
@@ -41,8 +41,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library. If not, see <https://www.gnu.org/licenses/>.
*
* Authors:
* Richard W.M. Jones <rjones@redhat.com>
@@ -53,10 +52,6 @@
#ifndef QEMU_URI_H
#define QEMU_URI_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/**
* URI:
*
@@ -64,48 +59,41 @@ extern "C" {
* as described in RFC 2396 but separated for further processing.
*/
typedef struct URI {
- char *scheme; /* the URI scheme */
- char *opaque; /* opaque part */
- char *authority; /* the authority part */
- char *server; /* the server part */
- char *user; /* the user part */
- int port; /* the port number */
- char *path; /* the path string */
- char *fragment; /* the fragment identifier */
- int cleanup; /* parsing potentially unclean URI */
- char *query; /* the query string (as it appears in the URI) */
+ char *scheme; /* the URI scheme */
+ char *opaque; /* opaque part */
+ char *authority; /* the authority part */
+ char *server; /* the server part */
+ char *user; /* the user part */
+ int port; /* the port number */
+ char *path; /* the path string */
+ char *fragment; /* the fragment identifier */
+ int cleanup; /* parsing potentially unclean URI */
+ char *query; /* the query string (as it appears in the URI) */
} URI;
URI *uri_new(void);
-char *uri_resolve(const char *URI, const char *base);
-char *uri_resolve_relative(const char *URI, const char *base);
URI *uri_parse(const char *str);
URI *uri_parse_raw(const char *str, int raw);
int uri_parse_into(URI *uri, const char *str);
char *uri_to_string(URI *uri);
-char *uri_string_escape(const char *str, const char *list);
-char *uri_string_unescape(const char *str, int len, char *target);
void uri_free(URI *uri);
/* Single web service query parameter 'name=value'. */
typedef struct QueryParam {
- char *name; /* Name (unescaped). */
- char *value; /* Value (unescaped). */
- int ignore; /* Ignore this field in qparam_get_query */
+ char *name; /* Name (unescaped). */
+ char *value; /* Value (unescaped). */
+ int ignore; /* Ignore this field in qparam_get_query */
} QueryParam;
/* Set of parameters. */
typedef struct QueryParams {
- int n; /* number of parameters used */
- int alloc; /* allocated space */
- QueryParam *p; /* array of parameters */
+ int n; /* number of parameters used */
+ int alloc; /* allocated space */
+ QueryParam *p; /* array of parameters */
} QueryParams;
-struct QueryParams *query_params_new (int init_alloc);
-extern QueryParams *query_params_parse (const char *query);
-extern void query_params_free (QueryParams *ps);
+QueryParams *query_params_new(int init_alloc);
+QueryParams *query_params_parse(const char *query);
+void query_params_free(QueryParams *ps);
-#ifdef __cplusplus
-}
-#endif
#endif /* QEMU_URI_H */
diff --git a/include/qemu/userfaultfd.h b/include/qemu/userfaultfd.h
new file mode 100644
index 0000000000..18a4314212
--- /dev/null
+++ b/include/qemu/userfaultfd.h
@@ -0,0 +1,46 @@
+/*
+ * Linux UFFD-WP support
+ *
+ * Copyright Virtuozzo GmbH, 2020
+ *
+ * Authors:
+ * Andrey Gruzdev <andrey.gruzdev@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef USERFAULTFD_H
+#define USERFAULTFD_H
+
+#ifdef CONFIG_LINUX
+
+#include "exec/hwaddr.h"
+#include <linux/userfaultfd.h>
+
+/**
+ * uffd_open(): Open an userfaultfd handle for current context.
+ *
+ * @flags: The flags we want to pass in when creating the handle.
+ *
+ * Returns: the uffd handle if >=0, or <0 if error happens.
+ */
+int uffd_open(int flags);
+int uffd_query_features(uint64_t *features);
+int uffd_create_fd(uint64_t features, bool non_blocking);
+void uffd_close_fd(int uffd_fd);
+int uffd_register_memory(int uffd_fd, void *addr, uint64_t length,
+ uint64_t mode, uint64_t *ioctls);
+int uffd_unregister_memory(int uffd_fd, void *addr, uint64_t length);
+int uffd_change_protection(int uffd_fd, void *addr, uint64_t length,
+ bool wp, bool dont_wake);
+int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
+ uint64_t length, bool dont_wake);
+int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake);
+int uffd_wakeup(int uffd_fd, void *addr, uint64_t length);
+int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count);
+bool uffd_poll_events(int uffd_fd, int tmo);
+
+#endif /* CONFIG_LINUX */
+
+#endif /* USERFAULTFD_H */
diff --git a/include/qemu/uuid.h b/include/qemu/uuid.h
index 9925febfa5..869f84af09 100644
--- a/include/qemu/uuid.h
+++ b/include/qemu/uuid.h
@@ -61,14 +61,27 @@ typedef struct {
(clock_seq_hi_and_reserved), (clock_seq_low), (node0), (node1), (node2),\
(node3), (node4), (node5) }
+/* Normal (network byte order) UUID */
+#define UUID(time_low, time_mid, time_hi_and_version, \
+ clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \
+ node3, node4, node5) \
+ { ((time_low) >> 24) & 0xff, ((time_low) >> 16) & 0xff, \
+ ((time_low) >> 8) & 0xff, (time_low) & 0xff, \
+ ((time_mid) >> 8) & 0xff, (time_mid) & 0xff, \
+ ((time_hi_and_version) >> 8) & 0xff, (time_hi_and_version) & 0xff, \
+ (clock_seq_hi_and_reserved), (clock_seq_low), \
+ (node0), (node1), (node2), (node3), (node4), (node5) \
+ }
+
#define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \
"%02hhx%02hhx-%02hhx%02hhx-" \
"%02hhx%02hhx-" \
"%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
-#define UUID_FMT_LEN 36
-
#define UUID_NONE "00000000-0000-0000-0000-000000000000"
+QEMU_BUILD_BUG_ON(sizeof(UUID_NONE) - 1 != 36);
+
+#define UUID_STR_LEN sizeof(UUID_NONE)
void qemu_uuid_generate(QemuUUID *out);
@@ -84,4 +97,6 @@ int qemu_uuid_parse(const char *str, QemuUUID *uuid);
QemuUUID qemu_uuid_bswap(QemuUUID uuid);
+uint32_t qemu_uuid_hash(const void *uuid);
+
#endif
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 1f057c2b9e..bde9495b25 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -18,11 +18,11 @@ typedef struct QEMUVFIOState QEMUVFIOState;
QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
void qemu_vfio_close(QEMUVFIOState *s);
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova_list);
+ bool temporary, uint64_t *iova_list, Error **errp);
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
- uint64_t offset, uint64_t size,
+ uint64_t offset, uint64_t size, int prot,
Error **errp);
void qemu_vfio_pci_unmap_bar(QEMUVFIOState *s, int index, void *bar,
uint64_t offset, uint64_t size);
diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
new file mode 100644
index 0000000000..0417ec0533
--- /dev/null
+++ b/include/qemu/vhost-user-server.h
@@ -0,0 +1,73 @@
+/*
+ * Sharing QEMU devices via vhost-user protocol
+ *
+ * Copyright (c) Coiby Xu <coiby.xu@gmail.com>.
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef VHOST_USER_SERVER_H
+#define VHOST_USER_SERVER_H
+
+#include "subprojects/libvhost-user/libvhost-user.h" /* only for the type definitions */
+#include "io/channel-socket.h"
+#include "io/channel-file.h"
+#include "io/net-listener.h"
+#include "qapi/error.h"
+#include "standard-headers/linux/virtio_blk.h"
+
+/* A kick fd that we monitor on behalf of libvhost-user */
+typedef struct VuFdWatch {
+ VuDev *vu_dev;
+ int fd; /*kick fd*/
+ void *pvt;
+ vu_watch_cb cb;
+ QTAILQ_ENTRY(VuFdWatch) next;
+} VuFdWatch;
+
+/**
+ * VuServer:
+ * A vhost-user server instance with user-defined VuDevIface callbacks.
+ * Vhost-user device backends can be implemented using VuServer. VuDevIface
+ * callbacks and virtqueue kicks run in the given AioContext.
+ */
+typedef struct {
+ QIONetListener *listener;
+ QEMUBH *restart_listener_bh;
+ AioContext *ctx;
+ int max_queues;
+ const VuDevIface *vu_iface;
+
+ unsigned int in_flight; /* atomic */
+
+ /* Protected by ctx lock */
+ bool in_qio_channel_yield;
+ bool wait_idle;
+ bool quiescing;
+ VuDev vu_dev;
+ QIOChannel *ioc; /* The I/O channel with the client */
+ QIOChannelSocket *sioc; /* The underlying data channel with the client */
+ QTAILQ_HEAD(, VuFdWatch) vu_fd_watches;
+
+ Coroutine *co_trip; /* coroutine for processing VhostUserMsg */
+} VuServer;
+
+bool vhost_user_server_start(VuServer *server,
+ SocketAddress *unix_socket,
+ AioContext *ctx,
+ uint16_t max_queues,
+ const VuDevIface *vu_iface,
+ Error **errp);
+
+void vhost_user_server_stop(VuServer *server);
+
+void vhost_user_server_inc_in_flight(VuServer *server);
+void vhost_user_server_dec_in_flight(VuServer *server);
+bool vhost_user_server_has_in_flight(VuServer *server);
+
+void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx);
+void vhost_user_server_detach_aio_context(VuServer *server);
+
+#endif /* VHOST_USER_SERVER_H */
diff --git a/include/qemu/win_dump_defs.h b/include/qemu/win_dump_defs.h
index 145096e8ee..73a44e2408 100644
--- a/include/qemu/win_dump_defs.h
+++ b/include/qemu/win_dump_defs.h
@@ -11,11 +11,22 @@
#ifndef QEMU_WIN_DUMP_DEFS_H
#define QEMU_WIN_DUMP_DEFS_H
+typedef struct WinDumpPhyMemRun32 {
+ uint32_t BasePage;
+ uint32_t PageCount;
+} QEMU_PACKED WinDumpPhyMemRun32;
+
typedef struct WinDumpPhyMemRun64 {
uint64_t BasePage;
uint64_t PageCount;
} QEMU_PACKED WinDumpPhyMemRun64;
+typedef struct WinDumpPhyMemDesc32 {
+ uint32_t NumberOfRuns;
+ uint32_t NumberOfPages;
+ WinDumpPhyMemRun32 Run[86];
+} QEMU_PACKED WinDumpPhyMemDesc32;
+
typedef struct WinDumpPhyMemDesc64 {
uint32_t NumberOfRuns;
uint32_t unused;
@@ -33,6 +44,39 @@ typedef struct WinDumpExceptionRecord {
uint64_t ExceptionInformation[15];
} QEMU_PACKED WinDumpExceptionRecord;
+typedef struct WinDumpHeader32 {
+ char Signature[4];
+ char ValidDump[4];
+ uint32_t MajorVersion;
+ uint32_t MinorVersion;
+ uint32_t DirectoryTableBase;
+ uint32_t PfnDatabase;
+ uint32_t PsLoadedModuleList;
+ uint32_t PsActiveProcessHead;
+ uint32_t MachineImageType;
+ uint32_t NumberProcessors;
+ union {
+ struct {
+ uint32_t BugcheckCode;
+ uint32_t BugcheckParameter1;
+ uint32_t BugcheckParameter2;
+ uint32_t BugcheckParameter3;
+ uint32_t BugcheckParameter4;
+ };
+ uint8_t BugcheckData[20];
+ };
+ uint8_t VersionUser[32];
+ uint32_t reserved0;
+ uint32_t KdDebuggerDataBlock;
+ union {
+ WinDumpPhyMemDesc32 PhysicalMemoryBlock;
+ uint8_t PhysicalMemoryBlockBuffer[700];
+ };
+ uint8_t reserved1[3200];
+ uint32_t RequiredDumpSpace;
+ uint8_t reserved2[92];
+} QEMU_PACKED WinDumpHeader32;
+
typedef struct WinDumpHeader64 {
char Signature[4];
char ValidDump[4];
@@ -81,24 +125,48 @@ typedef struct WinDumpHeader64 {
uint8_t reserved[4018];
} QEMU_PACKED WinDumpHeader64;
+typedef union WinDumpHeader {
+ struct {
+ char Signature[4];
+ char ValidDump[4];
+ };
+ WinDumpHeader32 x32;
+ WinDumpHeader64 x64;
+} WinDumpHeader;
+
#define KDBG_OWNER_TAG_OFFSET64 0x10
#define KDBG_MM_PFN_DATABASE_OFFSET64 0xC0
#define KDBG_KI_BUGCHECK_DATA_OFFSET64 0x88
#define KDBG_KI_PROCESSOR_BLOCK_OFFSET64 0x218
#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET64 0x338
+#define KDBG_OWNER_TAG_OFFSET KDBG_OWNER_TAG_OFFSET64
+#define KDBG_MM_PFN_DATABASE_OFFSET KDBG_MM_PFN_DATABASE_OFFSET64
+#define KDBG_KI_BUGCHECK_DATA_OFFSET KDBG_KI_BUGCHECK_DATA_OFFSET64
+#define KDBG_KI_PROCESSOR_BLOCK_OFFSET KDBG_KI_PROCESSOR_BLOCK_OFFSET64
+#define KDBG_OFFSET_PRCB_CONTEXT_OFFSET KDBG_OFFSET_PRCB_CONTEXT_OFFSET64
+
#define VMCOREINFO_ELF_NOTE_HDR_SIZE 24
+#define VMCOREINFO_WIN_DUMP_NOTE_SIZE64 (sizeof(WinDumpHeader64) + \
+ VMCOREINFO_ELF_NOTE_HDR_SIZE)
+#define VMCOREINFO_WIN_DUMP_NOTE_SIZE32 (sizeof(WinDumpHeader32) + \
+ VMCOREINFO_ELF_NOTE_HDR_SIZE)
#define WIN_CTX_X64 0x00100000L
+#define WIN_CTX_X86 0x00010000L
#define WIN_CTX_CTL 0x00000001L
#define WIN_CTX_INT 0x00000002L
#define WIN_CTX_SEG 0x00000004L
#define WIN_CTX_FP 0x00000008L
#define WIN_CTX_DBG 0x00000010L
+#define WIN_CTX_EXT 0x00000020L
+
+#define WIN_CTX64_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP)
+#define WIN_CTX64_ALL (WIN_CTX64_FULL | WIN_CTX_SEG | WIN_CTX_DBG)
-#define WIN_CTX_FULL (WIN_CTX_X64 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_FP)
-#define WIN_CTX_ALL (WIN_CTX_FULL | WIN_CTX_SEG | WIN_CTX_DBG)
+#define WIN_CTX32_FULL (WIN_CTX_X86 | WIN_CTX_CTL | WIN_CTX_INT | WIN_CTX_SEG)
+#define WIN_CTX32_ALL (WIN_CTX32_FULL | WIN_CTX_FP | WIN_CTX_DBG | WIN_CTX_EXT)
#define LIVE_SYSTEM_DUMP 0x00000161
@@ -107,7 +175,41 @@ typedef struct WinM128A {
int64_t high;
} QEMU_ALIGNED(16) WinM128A;
-typedef struct WinContext {
+typedef struct WinContext32 {
+ uint32_t ContextFlags;
+
+ uint32_t Dr0;
+ uint32_t Dr1;
+ uint32_t Dr2;
+ uint32_t Dr3;
+ uint32_t Dr6;
+ uint32_t Dr7;
+
+ uint8_t FloatSave[112];
+
+ uint32_t SegGs;
+ uint32_t SegFs;
+ uint32_t SegEs;
+ uint32_t SegDs;
+
+ uint32_t Edi;
+ uint32_t Esi;
+ uint32_t Ebx;
+ uint32_t Edx;
+ uint32_t Ecx;
+ uint32_t Eax;
+
+ uint32_t Ebp;
+ uint32_t Eip;
+ uint32_t SegCs;
+ uint32_t EFlags;
+ uint32_t Esp;
+ uint32_t SegSs;
+
+ uint8_t ExtendedRegisters[512];
+} QEMU_ALIGNED(16) WinContext32;
+
+typedef struct WinContext64 {
uint64_t PHome[6];
uint32_t ContextFlags;
@@ -174,6 +276,11 @@ typedef struct WinContext {
uint64_t LastBranchFromRip;
uint64_t LastExceptionToRip;
uint64_t LastExceptionFromRip;
-} QEMU_ALIGNED(16) WinContext;
+} QEMU_ALIGNED(16) WinContext64;
+
+typedef union WinContext {
+ WinContext32 x32;
+ WinContext64 x64;
+} WinContext;
#endif /* QEMU_WIN_DUMP_DEFS_H */
diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h
index a83fe8e749..b08a934acc 100644
--- a/include/qemu/xattr.h
+++ b/include/qemu/xattr.h
@@ -22,8 +22,12 @@
#ifdef CONFIG_LIBATTR
# include <attr/xattr.h>
#else
-# define ENOATTR ENODATA
-# include <sys/xattr.h>
+# if !defined(ENOATTR)
+# define ENOATTR ENODATA
+# endif
+# ifndef CONFIG_WIN32
+# include <sys/xattr.h>
+# endif
#endif
#endif
diff --git a/include/qemu/xxhash.h b/include/qemu/xxhash.h
index 076f1f6054..0259bbef18 100644
--- a/include/qemu/xxhash.h
+++ b/include/qemu/xxhash.h
@@ -48,8 +48,8 @@
* xxhash32, customized for input variables that are not guaranteed to be
* contiguous in memory.
*/
-static inline uint32_t
-qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
+static inline uint32_t qemu_xxhash8(uint64_t ab, uint64_t cd, uint64_t ef,
+ uint32_t g, uint32_t h)
{
uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2;
uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2;
@@ -59,6 +59,8 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
uint32_t b = ab >> 32;
uint32_t c = cd;
uint32_t d = cd >> 32;
+ uint32_t e = ef;
+ uint32_t f = ef >> 32;
uint32_t h32;
v1 += a * PRIME32_2;
@@ -89,6 +91,9 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
h32 += g * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4;
+ h32 += h * PRIME32_3;
+ h32 = rol32(h32, 17) * PRIME32_4;
+
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
@@ -100,23 +105,127 @@ qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
static inline uint32_t qemu_xxhash2(uint64_t ab)
{
- return qemu_xxhash7(ab, 0, 0, 0, 0);
+ return qemu_xxhash8(ab, 0, 0, 0, 0);
}
static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd)
{
- return qemu_xxhash7(ab, cd, 0, 0, 0);
+ return qemu_xxhash8(ab, cd, 0, 0, 0);
}
static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e)
{
- return qemu_xxhash7(ab, cd, e, 0, 0);
+ return qemu_xxhash8(ab, cd, 0, e, 0);
}
static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
uint32_t f)
{
- return qemu_xxhash7(ab, cd, e, f, 0);
+ return qemu_xxhash8(ab, cd, 0, e, f);
+}
+
+static inline uint32_t qemu_xxhash7(uint64_t ab, uint64_t cd, uint64_t ef,
+ uint32_t g)
+{
+ return qemu_xxhash8(ab, cd, ef, g, 0);
+}
+
+/*
+ * Component parts of the XXH64 algorithm from
+ * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
+ *
+ * The complete algorithm looks like
+ *
+ * i = 0;
+ * if (len >= 32) {
+ * v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ * v2 = seed + XXH_PRIME64_2;
+ * v3 = seed + 0;
+ * v4 = seed - XXH_PRIME64_1;
+ * do {
+ * v1 = XXH64_round(v1, get64bits(input + i));
+ * v2 = XXH64_round(v2, get64bits(input + i + 8));
+ * v3 = XXH64_round(v3, get64bits(input + i + 16));
+ * v4 = XXH64_round(v4, get64bits(input + i + 24));
+ * } while ((i += 32) <= len);
+ * h64 = XXH64_mergerounds(v1, v2, v3, v4);
+ * } else {
+ * h64 = seed + XXH_PRIME64_5;
+ * }
+ * h64 += len;
+ *
+ * for (; i + 8 <= len; i += 8) {
+ * h64 ^= XXH64_round(0, get64bits(input + i));
+ * h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ * }
+ * for (; i + 4 <= len; i += 4) {
+ * h64 ^= get32bits(input + i) * PRIME64_1;
+ * h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ * }
+ * for (; i < len; i += 1) {
+ * h64 ^= get8bits(input + i) * XXH_PRIME64_5;
+ * h64 = rol64(h64, 11) * XXH_PRIME64_1;
+ * }
+ *
+ * return XXH64_avalanche(h64)
+ *
+ * Exposing the pieces instead allows for simplified usage when
+ * the length is a known constant and the inputs are in registers.
+ */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
+
+static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
+{
+ return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
+}
+
+static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
+{
+ return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
+}
+
+static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
+ uint64_t v3, uint64_t v4)
+{
+ uint64_t h64;
+
+ h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
+ h64 = XXH64_mergeround(h64, v1);
+ h64 = XXH64_mergeround(h64, v2);
+ h64 = XXH64_mergeround(h64, v3);
+ h64 = XXH64_mergeround(h64, v4);
+
+ return h64;
+}
+
+static inline uint64_t XXH64_avalanche(uint64_t h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= XXH_PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= XXH_PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
+ uint64_t c, uint64_t d)
+{
+ uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
+ uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
+ uint64_t v3 = QEMU_XXHASH_SEED + 0;
+ uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
+
+ v1 = XXH64_round(v1, a);
+ v2 = XXH64_round(v2, b);
+ v3 = XXH64_round(v3, c);
+ v4 = XXH64_round(v4, d);
+
+ return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
}
#endif /* QEMU_XXHASH_H */
diff --git a/include/qemu/yank.h b/include/qemu/yank.h
new file mode 100644
index 0000000000..3d88af6996
--- /dev/null
+++ b/include/qemu/yank.h
@@ -0,0 +1,87 @@
+/*
+ * QEMU yank feature
+ *
+ * Copyright (c) Lukas Straub <lukasstraub2@web.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef YANK_H
+#define YANK_H
+
+#include "qapi/qapi-types-yank.h"
+
+typedef void (YankFn)(void *opaque);
+
+/**
+ * yank_register_instance: Register a new instance.
+ *
+ * This registers a new instance for yanking. Must be called before any yank
+ * function is registered for this instance.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @errp: Error object.
+ *
+ * Returns true on success or false if an error occurred.
+ */
+bool yank_register_instance(const YankInstance *instance, Error **errp);
+
+/**
+ * yank_unregister_instance: Unregister a instance.
+ *
+ * This unregisters a instance. Must be called only after every yank function
+ * of the instance has been unregistered.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ */
+void yank_unregister_instance(const YankInstance *instance);
+
+/**
+ * yank_register_function: Register a yank function
+ *
+ * This registers a yank function. All limitations of qmp oob commands apply
+ * to the yank function as well. See docs/devel/qapi-code-gen.rst under
+ * "An OOB-capable command handler must satisfy the following conditions".
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @func: The yank function.
+ * @opaque: Will be passed to the yank function.
+ */
+void yank_register_function(const YankInstance *instance,
+ YankFn *func,
+ void *opaque);
+
+/**
+ * yank_unregister_function: Unregister a yank function
+ *
+ * This unregisters a yank function.
+ *
+ * This function is thread-safe.
+ *
+ * @instance: The instance.
+ * @func: func that was passed to yank_register_function.
+ * @opaque: opaque that was passed to yank_register_function.
+ */
+void yank_unregister_function(const YankInstance *instance,
+ YankFn *func,
+ void *opaque);
+
+#define BLOCKDEV_YANK_INSTANCE(the_node_name) (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_BLOCK_NODE, \
+ .u.block_node.node_name = (the_node_name) })
+
+#define CHARDEV_YANK_INSTANCE(the_id) (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_CHARDEV, \
+ .u.chardev.id = (the_id) })
+
+#define MIGRATION_YANK_INSTANCE (&(YankInstance) { \
+ .type = YANK_INSTANCE_TYPE_MIGRATION })
+
+#endif
diff --git a/include/qom/object.h b/include/qom/object.h
index 0f3a60617c..13d3a655dd 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -27,290 +27,10 @@ typedef struct InterfaceInfo InterfaceInfo;
#define TYPE_OBJECT "object"
-/**
- * SECTION:object.h
- * @title:Base Object Type System
- * @short_description: interfaces for creating new types and objects
- *
- * The QEMU Object Model provides a framework for registering user creatable
- * types and instantiating objects from those types. QOM provides the following
- * features:
- *
- * - System for dynamically registering types
- * - Support for single-inheritance of types
- * - Multiple inheritance of stateless interfaces
- *
- * <example>
- * <title>Creating a minimal type</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * #define TYPE_MY_DEVICE "my-device"
- *
- * // No new virtual functions: we can reuse the typedef for the
- * // superclass.
- * typedef DeviceClass MyDeviceClass;
- * typedef struct MyDevice
- * {
- * DeviceState parent;
- *
- * int reg0, reg1, reg2;
- * } MyDevice;
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * };
- *
- * static void my_device_register_types(void)
- * {
- * type_register_static(&my_device_info);
- * }
- *
- * type_init(my_device_register_types)
- * </programlisting>
- * </example>
- *
- * In the above example, we create a simple type that is described by #TypeInfo.
- * #TypeInfo describes information about the type including what it inherits
- * from, the instance and class size, and constructor/destructor hooks.
- *
- * Alternatively several static types could be registered using helper macro
- * DEFINE_TYPES()
- *
- * <example>
- * <programlisting>
- * static const TypeInfo device_types_info[] = {
- * {
- * .name = TYPE_MY_DEVICE_A,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDeviceA),
- * },
- * {
- * .name = TYPE_MY_DEVICE_B,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDeviceB),
- * },
- * };
- *
- * DEFINE_TYPES(device_types_info)
- * </programlisting>
- * </example>
- *
- * Every type has an #ObjectClass associated with it. #ObjectClass derivatives
- * are instantiated dynamically but there is only ever one instance for any
- * given type. The #ObjectClass typically holds a table of function pointers
- * for the virtual methods implemented by this type.
- *
- * Using object_new(), a new #Object derivative will be instantiated. You can
- * cast an #Object to a subclass (or base-class) type using
- * object_dynamic_cast(). You typically want to define macro wrappers around
- * OBJECT_CHECK() and OBJECT_CLASS_CHECK() to make it easier to convert to a
- * specific type:
- *
- * <example>
- * <title>Typecasting macros</title>
- * <programlisting>
- * #define MY_DEVICE_GET_CLASS(obj) \
- * OBJECT_GET_CLASS(MyDeviceClass, obj, TYPE_MY_DEVICE)
- * #define MY_DEVICE_CLASS(klass) \
- * OBJECT_CLASS_CHECK(MyDeviceClass, klass, TYPE_MY_DEVICE)
- * #define MY_DEVICE(obj) \
- * OBJECT_CHECK(MyDevice, obj, TYPE_MY_DEVICE)
- * </programlisting>
- * </example>
- *
- * # Class Initialization #
- *
- * Before an object is initialized, the class for the object must be
- * initialized. There is only one class object for all instance objects
- * that is created lazily.
- *
- * Classes are initialized by first initializing any parent classes (if
- * necessary). After the parent class object has initialized, it will be
- * copied into the current class object and any additional storage in the
- * class object is zero filled.
- *
- * The effect of this is that classes automatically inherit any virtual
- * function pointers that the parent class has already initialized. All
- * other fields will be zero filled.
- *
- * Once all of the parent classes have been initialized, #TypeInfo::class_init
- * is called to let the class being instantiated provide default initialize for
- * its virtual functions. Here is how the above example might be modified
- * to introduce an overridden virtual function:
- *
- * <example>
- * <title>Overriding a virtual function</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * void my_device_class_init(ObjectClass *klass, void *class_data)
- * {
- * DeviceClass *dc = DEVICE_CLASS(klass);
- * dc->reset = my_device_reset;
- * }
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * .class_init = my_device_class_init,
- * };
- * </programlisting>
- * </example>
- *
- * Introducing new virtual methods requires a class to define its own
- * struct and to add a .class_size member to the #TypeInfo. Each method
- * will also have a wrapper function to call it easily:
- *
- * <example>
- * <title>Defining an abstract class</title>
- * <programlisting>
- * #include "qdev.h"
- *
- * typedef struct MyDeviceClass
- * {
- * DeviceClass parent;
- *
- * void (*frobnicate) (MyDevice *obj);
- * } MyDeviceClass;
- *
- * static const TypeInfo my_device_info = {
- * .name = TYPE_MY_DEVICE,
- * .parent = TYPE_DEVICE,
- * .instance_size = sizeof(MyDevice),
- * .abstract = true, // or set a default in my_device_class_init
- * .class_size = sizeof(MyDeviceClass),
- * };
- *
- * void my_device_frobnicate(MyDevice *obj)
- * {
- * MyDeviceClass *klass = MY_DEVICE_GET_CLASS(obj);
- *
- * klass->frobnicate(obj);
- * }
- * </programlisting>
- * </example>
- *
- * # Interfaces #
- *
- * Interfaces allow a limited form of multiple inheritance. Instances are
- * similar to normal types except for the fact that are only defined by
- * their classes and never carry any state. As a consequence, a pointer to
- * an interface instance should always be of incomplete type in order to be
- * sure it cannot be dereferenced. That is, you should define the
- * 'typedef struct SomethingIf SomethingIf' so that you can pass around
- * 'SomethingIf *si' arguments, but not define a 'struct SomethingIf { ... }'.
- * The only things you can validly do with a 'SomethingIf *' are to pass it as
- * an argument to a method on its corresponding SomethingIfClass, or to
- * dynamically cast it to an object that implements the interface.
- *
- * # Methods #
- *
- * A <emphasis>method</emphasis> is a function within the namespace scope of
- * a class. It usually operates on the object instance by passing it as a
- * strongly-typed first argument.
- * If it does not operate on an object instance, it is dubbed
- * <emphasis>class method</emphasis>.
- *
- * Methods cannot be overloaded. That is, the #ObjectClass and method name
- * uniquely identity the function to be called; the signature does not vary
- * except for trailing varargs.
- *
- * Methods are always <emphasis>virtual</emphasis>. Overriding a method in
- * #TypeInfo.class_init of a subclass leads to any user of the class obtained
- * via OBJECT_GET_CLASS() accessing the overridden function.
- * The original function is not automatically invoked. It is the responsibility
- * of the overriding class to determine whether and when to invoke the method
- * being overridden.
- *
- * To invoke the method being overridden, the preferred solution is to store
- * the original value in the overriding class before overriding the method.
- * This corresponds to |[ {super,base}.method(...) ]| in Java and C#
- * respectively; this frees the overriding class from hardcoding its parent
- * class, which someone might choose to change at some point.
- *
- * <example>
- * <title>Overriding a virtual method</title>
- * <programlisting>
- * typedef struct MyState MyState;
- *
- * typedef void (*MyDoSomething)(MyState *obj);
- *
- * typedef struct MyClass {
- * ObjectClass parent_class;
- *
- * MyDoSomething do_something;
- * } MyClass;
- *
- * static void my_do_something(MyState *obj)
- * {
- * // do something
- * }
- *
- * static void my_class_init(ObjectClass *oc, void *data)
- * {
- * MyClass *mc = MY_CLASS(oc);
- *
- * mc->do_something = my_do_something;
- * }
- *
- * static const TypeInfo my_type_info = {
- * .name = TYPE_MY,
- * .parent = TYPE_OBJECT,
- * .instance_size = sizeof(MyState),
- * .class_size = sizeof(MyClass),
- * .class_init = my_class_init,
- * };
- *
- * typedef struct DerivedClass {
- * MyClass parent_class;
- *
- * MyDoSomething parent_do_something;
- * } DerivedClass;
- *
- * static void derived_do_something(MyState *obj)
- * {
- * DerivedClass *dc = DERIVED_GET_CLASS(obj);
- *
- * // do something here
- * dc->parent_do_something(obj);
- * // do something else here
- * }
- *
- * static void derived_class_init(ObjectClass *oc, void *data)
- * {
- * MyClass *mc = MY_CLASS(oc);
- * DerivedClass *dc = DERIVED_CLASS(oc);
- *
- * dc->parent_do_something = mc->do_something;
- * mc->do_something = derived_do_something;
- * }
- *
- * static const TypeInfo derived_type_info = {
- * .name = TYPE_DERIVED,
- * .parent = TYPE_MY,
- * .class_size = sizeof(DerivedClass),
- * .class_init = derived_class_init,
- * };
- * </programlisting>
- * </example>
- *
- * Alternatively, object_class_by_name() can be used to obtain the class and
- * its non-overridden methods for a specific type. This would correspond to
- * |[ MyClass::method(...) ]| in C++.
- *
- * The first example of such a QOM method was #CPUClass.reset,
- * another example is #DeviceClass.realize.
- */
-
-
typedef struct ObjectProperty ObjectProperty;
/**
- * ObjectPropertyAccessor:
+ * typedef ObjectPropertyAccessor:
* @obj: the object that owns the property
* @v: the visitor that contains the property data
* @name: the name of the property
@@ -326,7 +46,7 @@ typedef void (ObjectPropertyAccessor)(Object *obj,
Error **errp);
/**
- * ObjectPropertyResolve:
+ * typedef ObjectPropertyResolve:
* @obj: the object that owns the property
* @opaque: the opaque registered with the property
* @part: the name of the property
@@ -345,7 +65,7 @@ typedef Object *(ObjectPropertyResolve)(Object *obj,
const char *part);
/**
- * ObjectPropertyRelease:
+ * typedef ObjectPropertyRelease:
* @obj: the object that owns the property
* @name: the name of the property
* @opaque: the opaque registered with the property
@@ -357,7 +77,7 @@ typedef void (ObjectPropertyRelease)(Object *obj,
void *opaque);
/**
- * ObjectPropertyInit:
+ * typedef ObjectPropertyInit:
* @obj: the object that owns the property
* @prop: the property to set
*
@@ -380,7 +100,7 @@ struct ObjectProperty
};
/**
- * ObjectUnparent:
+ * typedef ObjectUnparent:
* @obj: the object that is being removed from the composition tree
*
* Called when an object is being removed from the QOM composition tree.
@@ -389,7 +109,7 @@ struct ObjectProperty
typedef void (ObjectUnparent)(Object *obj);
/**
- * ObjectFree:
+ * typedef ObjectFree:
* @obj: the object being freed
*
* Called when an object's last reference is removed.
@@ -399,14 +119,14 @@ typedef void (ObjectFree)(void *obj);
#define OBJECT_CLASS_CAST_CACHE 4
/**
- * ObjectClass:
+ * struct ObjectClass:
*
* The base for all classes. The only thing that #ObjectClass contains is an
* integer type handle.
*/
struct ObjectClass
{
- /*< private >*/
+ /* private: */
Type type;
GSList *interfaces;
@@ -419,7 +139,7 @@ struct ObjectClass
};
/**
- * Object:
+ * struct Object:
*
* The base for all objects. The first member of this object is a pointer to
* a #ObjectClass. Since C guarantees that the first member of a structure
@@ -432,7 +152,7 @@ struct ObjectClass
*/
struct Object
{
- /*< private >*/
+ /* private: */
ObjectClass *class;
ObjectFree *free;
GHashTable *properties;
@@ -441,12 +161,285 @@ struct Object
};
/**
- * TypeInfo:
+ * DECLARE_INSTANCE_CHECKER:
+ * @InstanceType: instance struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the instance type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_INSTANCE_CHECKER(InstanceType, OBJ_NAME, TYPENAME) \
+ static inline G_GNUC_UNUSED InstanceType * \
+ OBJ_NAME(const void *obj) \
+ { return OBJECT_CHECK(InstanceType, obj, TYPENAME); }
+
+/**
+ * DECLARE_CLASS_CHECKERS:
+ * @ClassType: class struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the class type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_CLASS_CHECKERS(ClassType, OBJ_NAME, TYPENAME) \
+ static inline G_GNUC_UNUSED ClassType * \
+ OBJ_NAME##_GET_CLASS(const void *obj) \
+ { return OBJECT_GET_CLASS(ClassType, obj, TYPENAME); } \
+ \
+ static inline G_GNUC_UNUSED ClassType * \
+ OBJ_NAME##_CLASS(const void *klass) \
+ { return OBJECT_CLASS_CHECK(ClassType, klass, TYPENAME); }
+
+/**
+ * DECLARE_OBJ_CHECKERS:
+ * @InstanceType: instance struct name
+ * @ClassType: class struct name
+ * @OBJ_NAME: the object name in uppercase with underscore separators
+ * @TYPENAME: type name
+ *
+ * Direct usage of this macro should be avoided, and the complete
+ * OBJECT_DECLARE_TYPE macro is recommended instead.
+ *
+ * This macro will provide the three standard type cast functions for a
+ * QOM type.
+ */
+#define DECLARE_OBJ_CHECKERS(InstanceType, ClassType, OBJ_NAME, TYPENAME) \
+ DECLARE_INSTANCE_CHECKER(InstanceType, OBJ_NAME, TYPENAME) \
+ \
+ DECLARE_CLASS_CHECKERS(ClassType, OBJ_NAME, TYPENAME)
+
+/**
+ * OBJECT_DECLARE_TYPE:
+ * @InstanceType: instance struct name
+ * @ClassType: class struct name
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ *
+ * This macro is typically used in a header file, and will:
+ *
+ * - create the typedefs for the object and class structs
+ * - register the type for use with g_autoptr
+ * - provide three standard type cast functions
+ *
+ * The object struct and class struct need to be declared manually.
+ */
+#define OBJECT_DECLARE_TYPE(InstanceType, ClassType, MODULE_OBJ_NAME) \
+ typedef struct InstanceType InstanceType; \
+ typedef struct ClassType ClassType; \
+ \
+ G_DEFINE_AUTOPTR_CLEANUP_FUNC(InstanceType, object_unref) \
+ \
+ DECLARE_OBJ_CHECKERS(InstanceType, ClassType, \
+ MODULE_OBJ_NAME, TYPE_##MODULE_OBJ_NAME)
+
+/**
+ * OBJECT_DECLARE_SIMPLE_TYPE:
+ * @InstanceType: instance struct name
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ *
+ * This does the same as OBJECT_DECLARE_TYPE(), but with no class struct
+ * declared.
+ *
+ * This macro should be used unless the class struct needs to have
+ * virtual methods declared.
+ */
+#define OBJECT_DECLARE_SIMPLE_TYPE(InstanceType, MODULE_OBJ_NAME) \
+ typedef struct InstanceType InstanceType; \
+ \
+ G_DEFINE_AUTOPTR_CLEANUP_FUNC(InstanceType, object_unref) \
+ \
+ DECLARE_INSTANCE_CHECKER(InstanceType, MODULE_OBJ_NAME, TYPE_##MODULE_OBJ_NAME)
+
+
+/**
+ * DO_OBJECT_DEFINE_TYPE_EXTENDED:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @ABSTRACT: boolean flag to indicate whether the object can be instantiated
+ * @CLASS_SIZE: size of the type's class
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This is the base macro used to implement all the OBJECT_DEFINE_*
+ * macros. It should never be used directly in a source file.
+ */
+#define DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, CLASS_SIZE, ...) \
+ static void \
+ module_obj_name##_finalize(Object *obj); \
+ static void \
+ module_obj_name##_class_init(ObjectClass *oc, void *data); \
+ static void \
+ module_obj_name##_init(Object *obj); \
+ \
+ static const TypeInfo module_obj_name##_info = { \
+ .parent = TYPE_##PARENT_MODULE_OBJ_NAME, \
+ .name = TYPE_##MODULE_OBJ_NAME, \
+ .instance_size = sizeof(ModuleObjName), \
+ .instance_align = __alignof__(ModuleObjName), \
+ .instance_init = module_obj_name##_init, \
+ .instance_finalize = module_obj_name##_finalize, \
+ .class_size = CLASS_SIZE, \
+ .class_init = module_obj_name##_class_init, \
+ .abstract = ABSTRACT, \
+ .interfaces = (InterfaceInfo[]) { __VA_ARGS__ } , \
+ }; \
+ \
+ static void \
+ module_obj_name##_register_types(void) \
+ { \
+ type_register_static(&module_obj_name##_info); \
+ } \
+ type_init(module_obj_name##_register_types);
+
+/**
+ * OBJECT_DEFINE_TYPE_EXTENDED:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @ABSTRACT: boolean flag to indicate whether the object can be instantiated
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This macro is typically used in a source file, and will:
+ *
+ * - declare prototypes for _finalize, _class_init and _init methods
+ * - declare the TypeInfo struct instance
+ * - provide the constructor to register the type
+ *
+ * After using this macro, implementations of the _finalize, _class_init,
+ * and _init methods need to be written. Any of these can be zero-line
+ * no-op impls if no special logic is required for a given type.
+ *
+ * This macro should rarely be used, instead one of the more specialized
+ * macros is usually a better choice.
+ */
+#define OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, ...) \
+ DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ ABSTRACT, sizeof(ModuleObjName##Class), \
+ __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for the common case of a non-abstract type, without any interfaces.
+ */
+#define OBJECT_DEFINE_TYPE(ModuleObjName, module_obj_name, MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, { NULL })
+
+/**
+ * OBJECT_DEFINE_TYPE_WITH_INTERFACES:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ * @...: list of initializers for "InterfaceInfo" to declare implemented interfaces
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for the common case of a non-abstract type, with one or more implemented
+ * interfaces.
+ *
+ * Note when passing the list of interfaces, be sure to include the final
+ * NULL entry, e.g. { TYPE_USER_CREATABLE }, { NULL }
+ */
+#define OBJECT_DEFINE_TYPE_WITH_INTERFACES(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, ...) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_ABSTRACT_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a specialization of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable
+ * for defining an abstract type, without any interfaces.
+ */
+#define OBJECT_DEFINE_ABSTRACT_TYPE(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ true, { NULL })
+
+/**
+ * OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for
+ * the case of a non-abstract type, with interfaces, and with no requirement
+ * for a class struct.
+ */
+#define OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, \
+ module_obj_name, \
+ MODULE_OBJ_NAME, \
+ PARENT_MODULE_OBJ_NAME, ...) \
+ DO_OBJECT_DEFINE_TYPE_EXTENDED(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, \
+ false, 0, __VA_ARGS__)
+
+/**
+ * OBJECT_DEFINE_SIMPLE_TYPE:
+ * @ModuleObjName: the object name with initial caps
+ * @module_obj_name: the object name in lowercase with underscore separators
+ * @MODULE_OBJ_NAME: the object name in uppercase with underscore separators
+ * @PARENT_MODULE_OBJ_NAME: the parent object name in uppercase with underscore
+ * separators
+ *
+ * This is a variant of OBJECT_DEFINE_TYPE_EXTENDED, which is suitable for
+ * the common case of a non-abstract type, without any interfaces, and with
+ * no requirement for a class struct. If you declared your type with
+ * OBJECT_DECLARE_SIMPLE_TYPE then this is probably the right choice for
+ * defining it.
+ */
+#define OBJECT_DEFINE_SIMPLE_TYPE(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME) \
+ OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(ModuleObjName, module_obj_name, \
+ MODULE_OBJ_NAME, PARENT_MODULE_OBJ_NAME, { NULL })
+
+/**
+ * struct TypeInfo:
* @name: The name of the type.
* @parent: The name of the parent type.
* @instance_size: The size of the object (derivative of #Object). If
* @instance_size is 0, then the size of the object will be the size of the
* parent object.
+ * @instance_align: The required alignment of the object. If @instance_align
+ * is 0, then normal malloc alignment is sufficient; if non-zero, then we
+ * must use qemu_memalign for allocation.
* @instance_init: This function is called to initialize an object. The parent
* class will have already been initialized so the type is only responsible
* for initializing its own members.
@@ -484,6 +477,7 @@ struct TypeInfo
const char *parent;
size_t instance_size;
+ size_t instance_align;
void (*instance_init)(Object *obj);
void (*instance_post_init)(Object *obj);
void (*instance_finalize)(Object *obj);
@@ -563,7 +557,7 @@ struct TypeInfo
OBJECT_CLASS_CHECK(class, object_get_class(OBJECT(obj)), name)
/**
- * InterfaceInfo:
+ * struct InterfaceInfo:
* @type: The name of the interface.
*
* The information associated with an interface.
@@ -573,7 +567,7 @@ struct InterfaceInfo {
};
/**
- * InterfaceClass:
+ * struct InterfaceClass:
* @parent_class: the base class
*
* The class for all interfaces. Subclasses of this class should only add
@@ -582,7 +576,7 @@ struct InterfaceInfo {
struct InterfaceClass
{
ObjectClass parent_class;
- /*< private >*/
+ /* private: */
ObjectClass *concrete_class;
Type interface_type;
};
@@ -654,27 +648,25 @@ Object *object_new(const char *typename);
* object will be marked complete once all the properties have been
* processed.
*
- * <example>
- * <title>Creating an object with properties</title>
- * <programlisting>
- * Error *err = NULL;
- * Object *obj;
- *
- * obj = object_new_with_props(TYPE_MEMORY_BACKEND_FILE,
- * object_get_objects_root(),
- * "hostmem0",
- * &err,
- * "share", "yes",
- * "mem-path", "/dev/shm/somefile",
- * "prealloc", "yes",
- * "size", "1048576",
- * NULL);
- *
- * if (!obj) {
- * error_reportf_err(err, "Cannot create memory backend: ");
- * }
- * </programlisting>
- * </example>
+ * .. code-block:: c
+ * :caption: Creating an object with properties
+ *
+ * Error *err = NULL;
+ * Object *obj;
+ *
+ * obj = object_new_with_props(TYPE_MEMORY_BACKEND_FILE,
+ * object_get_objects_root(),
+ * "hostmem0",
+ * &err,
+ * "share", "yes",
+ * "mem-path", "/dev/shm/somefile",
+ * "prealloc", "yes",
+ * "size", "1048576",
+ * NULL);
+ *
+ * if (!obj) {
+ * error_reportf_err(err, "Cannot create memory backend: ");
+ * }
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
@@ -685,7 +677,7 @@ Object *object_new_with_props(const char *typename,
Object *parent,
const char *id,
Error **errp,
- ...) QEMU_SENTINEL;
+ ...) G_GNUC_NULL_TERMINATED;
/**
* object_new_with_propv:
@@ -707,7 +699,8 @@ bool object_apply_global_props(Object *obj, const GPtrArray *props,
Error **errp);
void object_set_machine_compat_props(GPtrArray *compat_props);
void object_set_accelerator_compat_props(GPtrArray *compat_props);
-void object_register_sugar_prop(const char *driver, const char *prop, const char *value);
+void object_register_sugar_prop(const char *driver, const char *prop,
+ const char *value, bool optional);
void object_apply_compat_props(Object *obj);
/**
@@ -723,30 +716,28 @@ void object_apply_compat_props(Object *obj);
* strings. The propname of %NULL indicates the end of the property
* list.
*
- * <example>
- * <title>Update an object's properties</title>
- * <programlisting>
- * Error *err = NULL;
- * Object *obj = ...get / create object...;
- *
- * if (!object_set_props(obj,
- * &err,
- * "share", "yes",
- * "mem-path", "/dev/shm/somefile",
- * "prealloc", "yes",
- * "size", "1048576",
- * NULL)) {
- * error_reportf_err(err, "Cannot set properties: ");
- * }
- * </programlisting>
- * </example>
+ * .. code-block:: c
+ * :caption: Update an object's properties
+ *
+ * Error *err = NULL;
+ * Object *obj = ...get / create object...;
+ *
+ * if (!object_set_props(obj,
+ * &err,
+ * "share", "yes",
+ * "mem-path", "/dev/shm/somefile",
+ * "prealloc", "yes",
+ * "size", "1048576",
+ * NULL)) {
+ * error_reportf_err(err, "Cannot set properties: ");
+ * }
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
*
* Returns: %true on success, %false on error.
*/
-bool object_set_props(Object *obj, Error **errp, ...) QEMU_SENTINEL;
+bool object_set_props(Object *obj, Error **errp, ...) G_GNUC_NULL_TERMINATED;
/**
* object_set_propv:
@@ -798,7 +789,7 @@ void object_initialize(void *obj, size_t size, const char *typename);
bool object_initialize_child_with_props(Object *parentobj,
const char *propname,
void *childobj, size_t size, const char *type,
- Error **errp, ...) QEMU_SENTINEL;
+ Error **errp, ...) G_GNUC_NULL_TERMINATED;
/**
* object_initialize_child_with_propsv:
@@ -827,10 +818,11 @@ bool object_initialize_child_with_propsv(Object *parentobj,
* object.
* @type: The name of the type of the object to instantiate.
*
- * This is like
- * object_initialize_child_with_props(parent, propname,
- * child, sizeof(*child), type,
- * &error_abort, NULL)
+ * This is like::
+ *
+ * object_initialize_child_with_props(parent, propname,
+ * child, sizeof(*child), type,
+ * &error_abort, NULL)
*/
#define object_initialize_child(parent, propname, child, type) \
object_initialize_child_internal((parent), (propname), \
@@ -853,6 +845,11 @@ Object *object_dynamic_cast(Object *obj, const char *typename);
/**
* object_dynamic_cast_assert:
+ * @obj: The object to cast.
+ * @typename: The @typename to cast to.
+ * @file: Source code file where function was called
+ * @line: Source code line where function was called
+ * @func: Name of function where this function was called
*
* See object_dynamic_cast() for a description of the parameters of this
* function. The only difference in behavior is that this function asserts
@@ -926,15 +923,41 @@ static void do_qemu_init_ ## type_array(void) \
type_init(do_qemu_init_ ## type_array)
/**
+ * type_print_class_properties:
+ * @type: a QOM class name
+ *
+ * Print the object's class properties to stdout or the monitor.
+ * Return whether an object was found.
+ */
+bool type_print_class_properties(const char *type);
+
+/**
+ * object_set_properties_from_keyval:
+ * @obj: a QOM object
+ * @qdict: a dictionary with the properties to be set
+ * @from_json: true if leaf values of @qdict are typed, false if they
+ * are strings
+ * @errp: pointer to error object
+ *
+ * For each key in the dictionary, parse the value string if needed,
+ * then set the corresponding property in @obj.
+ */
+void object_set_properties_from_keyval(Object *obj, const QDict *qdict,
+ bool from_json, Error **errp);
+
+/**
* object_class_dynamic_cast_assert:
* @klass: The #ObjectClass to attempt to cast.
* @typename: The QOM typename of the class to cast to.
+ * @file: Source code file where function was called
+ * @line: Source code line where function was called
+ * @func: Name of function where this function was called
*
* See object_class_dynamic_cast() for a description of the parameters
* of this function. The only difference in behavior is that this function
* asserts instead of returning #NULL on failure if QOM cast debugging is
* enabled. This function is not meant to be called directly, but only through
- * the wrapper macros OBJECT_CLASS_CHECK and INTERFACE_CHECK.
+ * the wrapper macro OBJECT_CLASS_CHECK.
*/
ObjectClass *object_class_dynamic_cast_assert(ObjectClass *klass,
const char *typename,
@@ -1035,7 +1058,7 @@ GSList *object_class_get_list_sorted(const char *implements_type,
* as its reference count is greater than zero.
* Returns: @obj
*/
-Object *object_ref(Object *obj);
+Object *object_ref(void *obj);
/**
* object_unref:
@@ -1044,7 +1067,7 @@ Object *object_ref(Object *obj);
* Decrease the reference count of a object. A object cannot be freed as long
* as its reference count is greater than zero.
*/
-void object_unref(Object *obj);
+void object_unref(void *obj);
/**
* object_property_try_add:
@@ -1080,6 +1103,23 @@ ObjectProperty *object_property_try_add(Object *obj, const char *name,
* object_property_add:
* Same as object_property_try_add() with @errp hardcoded to
* &error_abort.
+ *
+ * @obj: the object to add a property to
+ * @name: the name of the property. This can contain any character except for
+ * a forward slash. In general, you should use hyphens '-' instead of
+ * underscores '_' when naming properties.
+ * @type: the type name of the property. This namespace is pretty loosely
+ * defined. Sub namespaces are constructed by using a prefix and then
+ * to angle brackets. For instance, the type 'virtio-net-pci' in the
+ * 'link' namespace would be 'link<virtio-net-pci>'.
+ * @get: The getter to be called to read a property. If this is NULL, then
+ * the property cannot be read.
+ * @set: the setter to be called to write a property. If this is NULL,
+ * then the property cannot be written.
+ * @release: called when the property is removed from the object. This is
+ * meant to allow a property to free its opaque upon object
+ * destruction. This may be NULL.
+ * @opaque: an opaque pointer to pass to the callbacks for the property
*/
ObjectProperty *object_property_add(Object *obj, const char *name,
const char *type,
@@ -1116,6 +1156,14 @@ void object_property_set_default_bool(ObjectProperty *prop, bool value);
void object_property_set_default_str(ObjectProperty *prop, const char *value);
/**
+ * object_property_set_default_list:
+ * @prop: the property to set
+ *
+ * Set the property default value to be an empty list.
+ */
+void object_property_set_default_list(ObjectProperty *prop);
+
+/**
* object_property_set_default_int:
* @prop: the property to set
* @value: the value to be written to the property
@@ -1137,14 +1185,52 @@ void object_property_set_default_uint(ObjectProperty *prop, uint64_t value);
* object_property_find:
* @obj: the object
* @name: the name of the property
+ *
+ * Look up a property for an object.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_property_find(Object *obj, const char *name);
+
+/**
+ * object_property_find_err:
+ * @obj: the object
+ * @name: the name of the property
* @errp: returns an error if this function fails
*
- * Look up a property for an object and return its #ObjectProperty if found.
+ * Look up a property for an object.
+ *
+ * Return its #ObjectProperty if found, or NULL.
*/
-ObjectProperty *object_property_find(Object *obj, const char *name,
- Error **errp);
-ObjectProperty *object_class_property_find(ObjectClass *klass, const char *name,
- Error **errp);
+ObjectProperty *object_property_find_err(Object *obj,
+ const char *name,
+ Error **errp);
+
+/**
+ * object_class_property_find:
+ * @klass: the object class
+ * @name: the name of the property
+ *
+ * Look up a property for an object class.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_class_property_find(ObjectClass *klass,
+ const char *name);
+
+/**
+ * object_class_property_find_err:
+ * @klass: the object class
+ * @name: the name of the property
+ * @errp: returns an error if this function fails
+ *
+ * Look up a property for an object class.
+ *
+ * Return its #ObjectProperty if found, or NULL.
+ */
+ObjectProperty *object_class_property_find_err(ObjectClass *klass,
+ const char *name,
+ Error **errp);
typedef struct ObjectPropertyIterator {
ObjectClass *nextclass;
@@ -1153,6 +1239,7 @@ typedef struct ObjectPropertyIterator {
/**
* object_property_iter_init:
+ * @iter: the iterator instance
* @obj: the object
*
* Initializes an iterator for traversing all properties
@@ -1163,24 +1250,23 @@ typedef struct ObjectPropertyIterator {
*
* Typical usage pattern would be
*
- * <example>
- * <title>Using object property iterators</title>
- * <programlisting>
- * ObjectProperty *prop;
- * ObjectPropertyIterator iter;
+ * .. code-block:: c
+ * :caption: Using object property iterators
*
- * object_property_iter_init(&iter, obj);
- * while ((prop = object_property_iter_next(&iter))) {
- * ... do something with prop ...
- * }
- * </programlisting>
- * </example>
+ * ObjectProperty *prop;
+ * ObjectPropertyIterator iter;
+ *
+ * object_property_iter_init(&iter, obj);
+ * while ((prop = object_property_iter_next(&iter))) {
+ * ... do something with prop ...
+ * }
*/
void object_property_iter_init(ObjectPropertyIterator *iter,
Object *obj);
/**
* object_class_property_iter_init:
+ * @iter: the iterator instance
* @klass: the class
*
* Initializes an iterator for traversing all properties
@@ -1228,6 +1314,7 @@ bool object_property_get(Object *obj, const char *name, Visitor *v,
/**
* object_property_set_str:
+ * @obj: the object
* @name: the name of the property
* @value: the value to be written to the property
* @errp: returns an error if this function fails
@@ -1254,6 +1341,7 @@ char *object_property_get_str(Object *obj, const char *name,
/**
* object_property_set_link:
+ * @obj: the object
* @name: the name of the property
* @value: the value to be written to the property
* @errp: returns an error if this function fails
@@ -1261,7 +1349,7 @@ char *object_property_get_str(Object *obj, const char *name,
* Writes an object's canonical path to a property.
*
* If the link property was created with
- * <code>OBJ_PROP_LINK_STRONG</code> bit, the old target object is
+ * %OBJ_PROP_LINK_STRONG bit, the old target object is
* unreferenced, and a reference is added to the new target object.
*
* Returns: %true on success, %false on failure.
@@ -1284,6 +1372,7 @@ Object *object_property_get_link(Object *obj, const char *name,
/**
* object_property_set_bool:
+ * @obj: the object
* @name: the name of the property
* @value: the value to be written to the property
* @errp: returns an error if this function fails
@@ -1301,7 +1390,7 @@ bool object_property_set_bool(Object *obj, const char *name,
* @name: the name of the property
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to a boolean, or NULL if
+ * Returns: the value of the property, converted to a boolean, or false if
* an error occurs (including when the property value is not a bool).
*/
bool object_property_get_bool(Object *obj, const char *name,
@@ -1309,6 +1398,7 @@ bool object_property_get_bool(Object *obj, const char *name,
/**
* object_property_set_int:
+ * @obj: the object
* @name: the name of the property
* @value: the value to be written to the property
* @errp: returns an error if this function fails
@@ -1326,7 +1416,7 @@ bool object_property_set_int(Object *obj, const char *name,
* @name: the name of the property
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to an integer, or negative if
+ * Returns: the value of the property, converted to an integer, or -1 if
* an error occurs (including when the property value is not an integer).
*/
int64_t object_property_get_int(Object *obj, const char *name,
@@ -1334,6 +1424,7 @@ int64_t object_property_get_int(Object *obj, const char *name,
/**
* object_property_set_uint:
+ * @obj: the object
* @name: the name of the property
* @value: the value to be written to the property
* @errp: returns an error if this function fails
@@ -1364,9 +1455,9 @@ uint64_t object_property_get_uint(Object *obj, const char *name,
* @typename: the name of the enum data type
* @errp: returns an error if this function fails
*
- * Returns: the value of the property, converted to an integer, or
- * undefined if an error occurs (including when the property value is not
- * an enum).
+ * Returns: the value of the property, converted to an integer (which
+ * can't be negative), or -1 on error (including when the property
+ * value is not an enum).
*/
int object_property_get_enum(Object *obj, const char *name,
const char *typename, Error **errp);
@@ -1457,6 +1548,7 @@ Object *object_get_internal_root(void);
/**
* object_get_canonical_path_component:
+ * @obj: the object
*
* Returns: The final component in the object's canonical path. The canonical
* path is the path within the composition tree starting from the root.
@@ -1466,6 +1558,7 @@ const char *object_get_canonical_path_component(const Object *obj);
/**
* object_get_canonical_path:
+ * @obj: the object
*
* Returns: The canonical path for a object, newly allocated. This is
* the path within the composition tree starting from the root. Use
@@ -1520,6 +1613,31 @@ Object *object_resolve_path_type(const char *path, const char *typename,
bool *ambiguous);
/**
+ * object_resolve_type_unambiguous:
+ * @typename: the type to look for
+ * @errp: pointer to error object
+ *
+ * Return the only object in the QOM tree of type @typename.
+ * If no match or more than one match is found, an error is
+ * returned.
+ *
+ * Returns: The matched object or NULL on path lookup failure.
+ */
+Object *object_resolve_type_unambiguous(const char *typename, Error **errp);
+
+/**
+ * object_resolve_path_at:
+ * @parent: the object in which to resolve the path
+ * @path: the path to resolve
+ *
+ * This is like object_resolve_path(), except paths not starting with
+ * a slash are relative to @parent.
+ *
+ * Returns: The resolved object or NULL on path lookup failure.
+ */
+Object *object_resolve_path_at(Object *parent, const char *path);
+
+/**
* object_resolve_path_component:
* @parent: the object in which to resolve the path
* @part: the component to resolve.
@@ -1555,6 +1673,10 @@ ObjectProperty *object_property_try_add_child(Object *obj, const char *name,
/**
* object_property_add_child:
+ * @obj: the object to add a property to
+ * @name: the name of the property
+ * @child: the child object
+ *
* Same as object_property_try_add_child() with @errp hardcoded to
* &error_abort
*/
@@ -1572,13 +1694,17 @@ typedef enum {
/**
* object_property_allow_set_link:
+ * @obj: the object to add a property to
+ * @name: the name of the property
+ * @child: the child object
+ * @errp: pointer to error object
*
* The default implementation of the object_property_add_link() check()
* callback function. It allows the link property to be set and never returns
* an error.
*/
-void object_property_allow_set_link(const Object *, const char *,
- Object *, Error **);
+void object_property_allow_set_link(const Object *obj, const char *name,
+ Object *child, Error **errp);
/**
* object_property_add_link:
@@ -1595,16 +1721,16 @@ void object_property_allow_set_link(const Object *, const char *,
*
* Links form the graph in the object model.
*
- * The <code>@check()</code> callback is invoked when
+ * The @check() callback is invoked when
* object_property_set_link() is called and can raise an error to prevent the
- * link being set. If <code>@check</code> is NULL, the property is read-only
+ * link being set. If @check is NULL, the property is read-only
* and cannot be set.
*
* Ownership of the pointer that @child points to is transferred to the
- * link property. The reference count for <code>*@child</code> is
+ * link property. The reference count for *@child is
* managed by the property from after the function returns till the
* property is deleted with object_property_del(). If the
- * <code>@flags</code> <code>OBJ_PROP_LINK_STRONG</code> bit is set,
+ * @flags %OBJ_PROP_LINK_STRONG bit is set,
* the reference count is decremented when the property is deleted or
* modified.
*
@@ -1672,6 +1798,7 @@ ObjectProperty *object_class_property_add_bool(ObjectClass *klass,
* @obj: the object to add a property to
* @name: the name of the property
* @typename: the name of the enum data type
+ * @lookup: enum value namelookup table
* @get: the getter or %NULL if the property is write-only.
* @set: the setter or %NULL if the property is read-only
*
@@ -1814,7 +1941,7 @@ ObjectProperty *object_class_property_add_uint64_ptr(ObjectClass *klass,
* Add an alias for a property on an object. This function will add a property
* of the same type as the forwarded property.
*
- * The caller must ensure that <code>@target_obj</code> stays alive as long as
+ * The caller must ensure that @target_obj stays alive as long as
* this property exists. In the case of a child object or an alias on the same
* object this will be the case. For aliases to other objects the caller is
* responsible for taking a reference.
diff --git a/include/qom/object_interfaces.h b/include/qom/object_interfaces.h
index 7035829337..02b11a7ef0 100644
--- a/include/qom/object_interfaces.h
+++ b/include/qom/object_interfaces.h
@@ -2,16 +2,14 @@
#define OBJECT_INTERFACES_H
#include "qom/object.h"
+#include "qapi/qapi-types-qom.h"
#include "qapi/visitor.h"
#define TYPE_USER_CREATABLE "user-creatable"
-#define USER_CREATABLE_CLASS(klass) \
- OBJECT_CLASS_CHECK(UserCreatableClass, (klass), \
- TYPE_USER_CREATABLE)
-#define USER_CREATABLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(UserCreatableClass, (obj), \
- TYPE_USER_CREATABLE)
+typedef struct UserCreatableClass UserCreatableClass;
+DECLARE_CLASS_CHECKERS(UserCreatableClass, USER_CREATABLE,
+ TYPE_USER_CREATABLE)
#define USER_CREATABLE(obj) \
INTERFACE_CHECK(UserCreatable, (obj), \
TYPE_USER_CREATABLE)
@@ -40,14 +38,14 @@ typedef struct UserCreatable UserCreatable;
* object's type implements USER_CREATABLE interface and needs
* complete() callback to be called.
*/
-typedef struct UserCreatableClass {
+struct UserCreatableClass {
/* <private> */
InterfaceClass parent_class;
/* <public> */
void (*complete)(UserCreatable *uc, Error **errp);
bool (*can_be_deleted)(UserCreatable *uc);
-} UserCreatableClass;
+};
/**
* user_creatable_complete:
@@ -90,77 +88,72 @@ Object *user_creatable_add_type(const char *type, const char *id,
Visitor *v, Error **errp);
/**
- * user_creatable_add_dict:
- * @qdict: the object definition
- * @keyval: if true, use a keyval visitor for processing @qdict (i.e.
- * assume that all @qdict values are strings); otherwise, use
- * the normal QObject visitor (i.e. assume all @qdict values
- * have the QType expected by the QOM object type)
+ * user_creatable_add_qapi:
+ * @options: the object definition
* @errp: if an error occurs, a pointer to an area to store the error
*
- * Create an instance of the user creatable object that is defined by
- * @qdict. The object type is taken from the QDict key 'qom-type', its
- * ID from the key 'id'. The remaining entries in @qdict are used to
- * initialize the object properties.
- *
- * Returns: %true on success, %false on failure.
+ * Create an instance of the user creatable object according to the
+ * options passed in @opts as described in the QAPI schema documentation.
*/
-bool user_creatable_add_dict(QDict *qdict, bool keyval, Error **errp);
+void user_creatable_add_qapi(ObjectOptions *options, Error **errp);
/**
- * user_creatable_add_opts:
- * @opts: the object definition
+ * user_creatable_parse_str:
+ * @str: the object definition string as passed on the command line
* @errp: if an error occurs, a pointer to an area to store the error
*
- * Create an instance of the user creatable object whose type
- * is defined in @opts by the 'qom-type' option, placing it
- * in the object composition tree with name provided by the
- * 'id' field. The remaining options in @opts are used to
- * initialize the object properties.
+ * Parses the option for the user creatable object with a keyval parser and
+ * implicit key 'qom-type', converting the result to ObjectOptions.
*
- * Returns: the newly created object or NULL on error
+ * If a help option is given, print help instead.
+ *
+ * Returns: ObjectOptions on success, NULL when an error occurred (*errp is set
+ * then) or help was printed (*errp is not set).
*/
-Object *user_creatable_add_opts(QemuOpts *opts, Error **errp);
-
+ObjectOptions *user_creatable_parse_str(const char *str, Error **errp);
/**
- * user_creatable_add_opts_predicate:
- * @type: the QOM type to be added
+ * user_creatable_add_from_str:
+ * @str: the object definition string as passed on the command line
+ * @errp: if an error occurs, a pointer to an area to store the error
*
- * A callback function to determine whether an object
- * of type @type should be created. Instances of this
- * callback should be passed to user_creatable_add_opts_foreach
+ * Create an instance of the user creatable object by parsing @str
+ * with a keyval parser and implicit key 'qom-type', converting the
+ * result to ObjectOptions and calling into qmp_object_add().
+ *
+ * If a help option is given, print help instead.
+ *
+ * Returns: true when an object was successfully created, false when an error
+ * occurred (*errp is set then) or help was printed (*errp is not set).
*/
-typedef bool (*user_creatable_add_opts_predicate)(const char *type);
+bool user_creatable_add_from_str(const char *str, Error **errp);
/**
- * user_creatable_add_opts_foreach:
- * @opaque: a user_creatable_add_opts_predicate callback or NULL
- * @opts: options to create
- * @errp: unused
+ * user_creatable_process_cmdline:
+ * @cmdline: the object definition string as passed on the command line
*
- * An iterator callback to be used in conjunction with
- * the qemu_opts_foreach() method for creating a list of
- * objects from a set of QemuOpts
+ * Create an instance of the user creatable object by parsing @cmdline
+ * with a keyval parser and implicit key 'qom-type', converting the
+ * result to ObjectOptions and calling into qmp_object_add().
*
- * The @opaque parameter can be passed a user_creatable_add_opts_predicate
- * callback to filter which types of object are created during iteration.
- * When it fails, report the error.
+ * If a help option is given, print help instead and exit.
*
- * Returns: 0 on success, -1 when an error was reported.
+ * This function is only meant to be called during command line parsing.
+ * It exits the process on failure or after printing help.
*/
-int user_creatable_add_opts_foreach(void *opaque,
- QemuOpts *opts, Error **errp);
+void user_creatable_process_cmdline(const char *cmdline);
/**
* user_creatable_print_help:
* @type: the QOM type to be added
* @opts: options to create
*
- * Prints help if requested in @opts.
+ * Prints help if requested in @type or @opts. Note that if @type is neither
+ * "help"/"?" nor a valid user creatable type, no help will be printed
+ * regardless of @opts.
*
- * Returns: true if @opts contained a help option and help was printed, false
- * if no help option was found.
+ * Returns: true if a help option was found and help was printed, false
+ * otherwise.
*/
bool user_creatable_print_help(const char *type, QemuOpts *opts);
@@ -184,11 +177,4 @@ bool user_creatable_del(const char *id, Error **errp);
*/
void user_creatable_cleanup(void);
-/**
- * qmp_object_add:
- *
- * QMP command handler for object-add. See the QAPI schema for documentation.
- */
-void qmp_object_add(QDict *qdict, QObject **ret_data, Error **errp);
-
#endif
diff --git a/include/scsi/constants.h b/include/scsi/constants.h
index 874176019e..9b98451912 100644
--- a/include/scsi/constants.h
+++ b/include/scsi/constants.h
@@ -218,21 +218,25 @@
#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
#define TYPE_RBC 0x0e /* Simplified Direct-Access Device */
#define TYPE_OSD 0x11 /* Object-storage Device */
+#define TYPE_ZBC 0x14 /* Host-managed Zoned SCSI Device */
#define TYPE_WLUN 0x1e /* Well known LUN */
#define TYPE_NOT_PRESENT 0x1f
#define TYPE_INACTIVE 0x20
#define TYPE_NO_LUN 0x7f
/* Mode page codes for mode sense/set */
+#define MODE_PAGE_VENDOR_SPECIFIC 0x00
#define MODE_PAGE_R_W_ERROR 0x01
#define MODE_PAGE_HD_GEOMETRY 0x04
#define MODE_PAGE_FLEXIBLE_DISK_GEOMETRY 0x05
#define MODE_PAGE_CACHING 0x08
#define MODE_PAGE_AUDIO_CTL 0x0e
+#define MODE_PAGE_CONTROL 0x0a
#define MODE_PAGE_POWER 0x1a
#define MODE_PAGE_FAULT_FAIL 0x1c
#define MODE_PAGE_TO_PROTECT 0x1d
#define MODE_PAGE_CAPABILITIES 0x2a
+#define MODE_PAGE_APPLE_VENDOR 0x30
#define MODE_PAGE_ALLS 0x3f
/* Not in Mt. Fuji, but in ATAPI 2.6 -- deprecated now in favor
* of MODE_PAGE_SENSE_POWER */
diff --git a/include/scsi/pr-manager.h b/include/scsi/pr-manager.h
index 6ad5fd1ff7..45de28d354 100644
--- a/include/scsi/pr-manager.h
+++ b/include/scsi/pr-manager.h
@@ -5,37 +5,32 @@
#include "qapi/visitor.h"
#include "qom/object_interfaces.h"
#include "block/aio.h"
-#include "qemu/coroutine.h"
#define TYPE_PR_MANAGER "pr-manager"
-#define PR_MANAGER_CLASS(klass) \
- OBJECT_CLASS_CHECK(PRManagerClass, (klass), TYPE_PR_MANAGER)
-#define PR_MANAGER_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PRManagerClass, (obj), TYPE_PR_MANAGER)
-#define PR_MANAGER(obj) \
- OBJECT_CHECK(PRManager, (obj), TYPE_PR_MANAGER)
+OBJECT_DECLARE_TYPE(PRManager, PRManagerClass,
+ PR_MANAGER)
struct sg_io_hdr;
-typedef struct PRManager {
+struct PRManager {
/* <private> */
Object parent;
-} PRManager;
+};
/**
* PRManagerClass:
* @parent_class: the base class
* @run: callback invoked in thread pool context
*/
-typedef struct PRManagerClass {
+struct PRManagerClass {
/* <private> */
ObjectClass parent_class;
/* <public> */
int (*run)(PRManager *pr_mgr, int fd, struct sg_io_hdr *hdr);
bool (*is_connected)(PRManager *pr_mgr);
-} PRManagerClass;
+};
bool pr_manager_is_connected(PRManager *pr_mgr);
int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
diff --git a/include/scsi/utils.h b/include/scsi/utils.h
index fbc5588279..d5c8efa16e 100644
--- a/include/scsi/utils.h
+++ b/include/scsi/utils.h
@@ -16,6 +16,22 @@ enum SCSIXferMode {
SCSI_XFER_TO_DEV, /* WRITE, MODE_SELECT, ... */
};
+enum SCSIHostStatus {
+ SCSI_HOST_OK,
+ SCSI_HOST_NO_LUN,
+ SCSI_HOST_BUSY,
+ SCSI_HOST_TIME_OUT,
+ SCSI_HOST_BAD_RESPONSE,
+ SCSI_HOST_ABORTED,
+ SCSI_HOST_ERROR = 0x07,
+ SCSI_HOST_RESET = 0x08,
+ SCSI_HOST_TRANSPORT_DISRUPTED = 0xe,
+ SCSI_HOST_TARGET_FAILURE = 0x10,
+ SCSI_HOST_RESERVATION_ERROR = 0x11,
+ SCSI_HOST_ALLOCATION_FAILURE = 0x12,
+ SCSI_HOST_MEDIUM_ERROR = 0x13,
+};
+
typedef struct SCSICommand {
uint8_t buf[SCSI_CMD_BUF_SIZE];
int len;
@@ -57,6 +73,8 @@ extern const struct SCSISense sense_code_LBA_OUT_OF_RANGE;
extern const struct SCSISense sense_code_INVALID_FIELD;
/* Illegal request, Invalid field in parameter list */
extern const struct SCSISense sense_code_INVALID_PARAM;
+/* Illegal request, Invalid value in parameter list */
+extern const struct SCSISense sense_code_INVALID_PARAM_VALUE;
/* Illegal request, Parameter list length error */
extern const struct SCSISense sense_code_INVALID_PARAM_LEN;
/* Illegal request, LUN not supported */
@@ -121,16 +139,9 @@ int scsi_cdb_length(uint8_t *buf);
#ifdef CONFIG_LINUX
#define SG_ERR_DRIVER_TIMEOUT 0x06
#define SG_ERR_DRIVER_SENSE 0x08
-
-#define SG_ERR_DID_OK 0x00
-#define SG_ERR_DID_NO_CONNECT 0x01
-#define SG_ERR_DID_BUS_BUSY 0x02
-#define SG_ERR_DID_TIME_OUT 0x03
-
-#define SG_ERR_DRIVER_SENSE 0x08
-
-int sg_io_sense_from_errno(int errno_value, struct sg_io_hdr *io_hdr,
- SCSISense *sense);
#endif
+int scsi_sense_from_errno(int errno_value, SCSISense *sense);
+int scsi_sense_from_host_status(uint8_t host_status, SCSISense *sense);
+
#endif
diff --git a/include/semihosting/common-semi.h b/include/semihosting/common-semi.h
new file mode 100644
index 0000000000..0a91db7c41
--- /dev/null
+++ b/include/semihosting/common-semi.h
@@ -0,0 +1,39 @@
+/*
+ * Semihosting support for systems modeled on the Arm "Angel"
+ * semihosting syscalls design. This includes Arm and RISC-V processors
+ *
+ * Copyright (c) 2005, 2007 CodeSourcery.
+ * Copyright (c) 2019 Linaro
+ * Written by Paul Brook.
+ *
+ * Copyright © 2020 by Keith Packard <keithp@keithp.com>
+ * Adapted for systems other than ARM, including RISC-V, by Keith Packard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * ARM Semihosting is documented in:
+ * Semihosting for AArch32 and AArch64 Release 2.0
+ * https://static.docs.arm.com/100863/0200/semihosting.pdf
+ *
+ * RISC-V Semihosting is documented in:
+ * RISC-V Semihosting
+ * https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
+ */
+
+#ifndef COMMON_SEMI_H
+#define COMMON_SEMI_H
+
+void do_common_semihosting(CPUState *cs);
+
+#endif /* COMMON_SEMI_H */
diff --git a/include/semihosting/console.h b/include/semihosting/console.h
new file mode 100644
index 0000000000..bd78e5f03f
--- /dev/null
+++ b/include/semihosting/console.h
@@ -0,0 +1,59 @@
+/*
+ * Semihosting Console
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOST_CONSOLE_H
+#define SEMIHOST_CONSOLE_H
+
+#include "cpu.h"
+
+/**
+ * qemu_semihosting_console_read:
+ * @cs: CPUState
+ * @buf: host buffer
+ * @len: buffer size
+ *
+ * Receive at least one character from debug console. As this call may
+ * block if no data is available we suspend the CPU and will re-execute the
+ * instruction when data is there. Therefore two conditions must be met:
+ *
+ * - CPUState is synchronized before calling this function
+ * - pc is only updated once the character is successfully returned
+ *
+ * Returns: number of characters read, OR cpu_loop_exit!
+ */
+int qemu_semihosting_console_read(CPUState *cs, void *buf, int len);
+
+/**
+ * qemu_semihosting_console_write:
+ * @buf: host buffer
+ * @len: buffer size
+ *
+ * Write len bytes from buf to the debug console.
+ *
+ * Returns: number of bytes written -- this should only ever be short
+ * on some sort of i/o error.
+ */
+int qemu_semihosting_console_write(void *buf, int len);
+
+/*
+ * qemu_semihosting_console_block_until_ready:
+ * @cs: CPUState
+ *
+ * If no data is available we suspend the CPU and will re-execute the
+ * instruction when data is available.
+ */
+void qemu_semihosting_console_block_until_ready(CPUState *cs);
+
+/**
+ * qemu_semihosting_console_ready:
+ *
+ * Return true if characters are available for read; does not block.
+ */
+bool qemu_semihosting_console_ready(void);
+
+#endif /* SEMIHOST_CONSOLE_H */
diff --git a/include/semihosting/guestfd.h b/include/semihosting/guestfd.h
new file mode 100644
index 0000000000..3d426fedab
--- /dev/null
+++ b/include/semihosting/guestfd.h
@@ -0,0 +1,91 @@
+/*
+ * Hosted file support for semihosting syscalls.
+ *
+ * Copyright (c) 2005, 2007 CodeSourcery.
+ * Copyright (c) 2019 Linaro
+ * Copyright © 2020 by Keith Packard <keithp@keithp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOSTING_GUESTFD_H
+#define SEMIHOSTING_GUESTFD_H
+
+typedef enum GuestFDType {
+ GuestFDUnused = 0,
+ GuestFDHost,
+ GuestFDGDB,
+ GuestFDStatic,
+ GuestFDConsole,
+} GuestFDType;
+
+/*
+ * Guest file descriptors are integer indexes into an array of
+ * these structures (we will dynamically resize as necessary).
+ */
+typedef struct GuestFD {
+ GuestFDType type;
+ union {
+ int hostfd;
+ struct {
+ const uint8_t *data;
+ size_t len;
+ size_t off;
+ } staticfile;
+ };
+} GuestFD;
+
+/*
+ * For ARM semihosting, we have a separate structure for routing
+ * data for the console which is outside the guest fd address space.
+ */
+extern GuestFD console_in_gf;
+extern GuestFD console_out_gf;
+
+/**
+ * alloc_guestfd:
+ *
+ * Allocate an unused GuestFD index. The associated guestfd index
+ * will still be GuestFDUnused until it is initialized.
+ */
+int alloc_guestfd(void);
+
+/**
+ * dealloc_guestfd:
+ * @guestfd: GuestFD index
+ *
+ * Deallocate a GuestFD index. The associated GuestFD structure
+ * will be recycled for a subsequent allocation.
+ */
+void dealloc_guestfd(int guestfd);
+
+/**
+ * get_guestfd:
+ * @guestfd: GuestFD index
+ *
+ * Return the GuestFD structure associated with an initialized @guestfd,
+ * or NULL if it has not been allocated, or hasn't been initialized.
+ */
+GuestFD *get_guestfd(int guestfd);
+
+/**
+ * associate_guestfd:
+ * @guestfd: GuestFD index
+ * @hostfd: host file descriptor
+ *
+ * Initialize the GuestFD for @guestfd to GuestFDHost using @hostfd.
+ */
+void associate_guestfd(int guestfd, int hostfd);
+
+/**
+ * staticfile_guestfd:
+ * @guestfd: GuestFD index
+ * @data: data to be read
+ * @len: length of @data
+ *
+ * Initialize the GuestFD for @guestfd to GuestFDStatic.
+ * The @len bytes at @data will be returned to the guest on reads.
+ */
+void staticfile_guestfd(int guestfd, const uint8_t *data, size_t len);
+
+#endif /* SEMIHOSTING_GUESTFD_H */
diff --git a/include/hw/semihosting/semihost.h b/include/semihosting/semihost.h
index b8ce5117ae..97d2a2ba99 100644
--- a/include/hw/semihosting/semihost.h
+++ b/include/semihosting/semihost.h
@@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -27,7 +27,7 @@ typedef enum SemihostingTarget {
} SemihostingTarget;
#ifdef CONFIG_USER_ONLY
-static inline bool semihosting_enabled(void)
+static inline bool semihosting_enabled(bool is_user)
{
return true;
}
@@ -51,27 +51,25 @@ static inline const char *semihosting_get_cmdline(void)
{
return NULL;
}
-
-static inline Chardev *semihosting_get_chardev(void)
-{
- return NULL;
-}
-static inline void qemu_semihosting_console_init(void)
-{
-}
#else /* !CONFIG_USER_ONLY */
-bool semihosting_enabled(void);
+/**
+ * semihosting_enabled:
+ * @is_user: true if guest code is in usermode (i.e. not privileged)
+ *
+ * Return true if guest code is allowed to make semihosting calls.
+ */
+bool semihosting_enabled(bool is_user);
SemihostingTarget semihosting_get_target(void);
const char *semihosting_get_arg(int i);
int semihosting_get_argc(void);
const char *semihosting_get_cmdline(void);
void semihosting_arg_fallback(const char *file, const char *cmd);
-Chardev *semihosting_get_chardev(void);
/* for vl.c hooks */
void qemu_semihosting_enable(void);
-int qemu_semihosting_config_options(const char *opt);
-void qemu_semihosting_connect_chardevs(void);
-void qemu_semihosting_console_init(void);
+int qemu_semihosting_config_options(const char *optstr);
+void qemu_semihosting_chardev_init(void);
+void qemu_semihosting_console_init(Chardev *);
#endif /* CONFIG_USER_ONLY */
+void qemu_semihosting_guestfd_init(void);
#endif /* SEMIHOST_H */
diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h
new file mode 100644
index 0000000000..3a5ec229eb
--- /dev/null
+++ b/include/semihosting/syscalls.h
@@ -0,0 +1,75 @@
+/*
+ * Syscall implementations for semihosting.
+ *
+ * Copyright (c) 2022 Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef SEMIHOSTING_SYSCALLS_H
+#define SEMIHOSTING_SYSCALLS_H
+
+/*
+ * Argument loading from the guest is performed by the caller;
+ * results are returned via the 'complete' callback.
+ *
+ * String operands are in address/len pairs. The len argument may be 0
+ * (when the semihosting abi does not already provide the length),
+ * or non-zero (where it should include the terminating zero).
+ */
+
+typedef struct GuestFD GuestFD;
+
+void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len,
+ int gdb_flags, int mode);
+
+void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd);
+
+void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong buf, target_ulong len);
+
+void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete,
+ GuestFD *gf, target_ulong buf, target_ulong len);
+
+void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong buf, target_ulong len);
+
+void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete,
+ GuestFD *gf, target_ulong buf, target_ulong len);
+
+void semihost_sys_lseek(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, int64_t off, int gdb_whence);
+
+void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd);
+
+void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb,
+ gdb_syscall_complete_cb flen_cb,
+ int fd, target_ulong fstat_addr);
+
+void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, target_ulong addr);
+
+void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len,
+ target_ulong addr);
+
+void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong fname, target_ulong fname_len);
+
+void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong oname, target_ulong oname_len,
+ target_ulong nname, target_ulong nname_len);
+
+void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong cmd, target_ulong cmd_len);
+
+void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete,
+ target_ulong tv_addr, target_ulong tz_addr);
+
+void semihost_sys_poll_one(CPUState *cs, gdb_syscall_complete_cb complete,
+ int fd, GIOCondition cond, int timeout);
+
+#endif /* SEMIHOSTING_SYSCALLS_H */
diff --git a/include/semihosting/uaccess.h b/include/semihosting/uaccess.h
new file mode 100644
index 0000000000..3963eafc3e
--- /dev/null
+++ b/include/semihosting/uaccess.h
@@ -0,0 +1,63 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+
+#ifndef SEMIHOSTING_UACCESS_H
+#define SEMIHOSTING_UACCESS_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include semihosting/uaccess.h from user emulation
+#endif
+
+#include "cpu.h"
+
+#define get_user_u64(val, addr) \
+ ({ uint64_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = tswap64(val_); ret_; })
+
+#define get_user_u32(val, addr) \
+ ({ uint32_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = tswap32(val_); ret_; })
+
+#define get_user_u8(val, addr) \
+ ({ uint8_t val_ = 0; \
+ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \
+ &val_, sizeof(val_), 0); \
+ (val) = val_; ret_; })
+
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+#define put_user_u64(val, addr) \
+ ({ uint64_t val_ = tswap64(val); \
+ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+
+#define put_user_u32(val, addr) \
+ ({ uint32_t val_ = tswap32(val); \
+ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); })
+
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+void *uaccess_lock_user(CPUArchState *env, target_ulong addr,
+ target_ulong len, bool copy);
+#define lock_user(type, p, len, copy) uaccess_lock_user(env, p, len, copy)
+
+char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr);
+#define lock_user_string(p) uaccess_lock_user_string(env, p)
+
+void uaccess_unlock_user(CPUArchState *env, void *p,
+ target_ulong addr, target_ulong len);
+#define unlock_user(s, args, len) uaccess_unlock_user(env, s, args, len)
+
+ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr);
+#define target_strlen(p) uaccess_strlen_user(env, p)
+
+#endif /* SEMIHOSTING_SOFTMMU_UACCESS_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo-mac.h b/include/standard-headers/asm-m68k/bootinfo-mac.h
new file mode 100644
index 0000000000..449928cfcb
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo-mac.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H
+#define _UAPI_ASM_M68K_BOOTINFO_MAC_H
+
+
+ /*
+ * Macintosh-specific tags (all __be32)
+ */
+
+#define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */
+#define BI_MAC_VADDR 0x8001 /* Mac video base address */
+#define BI_MAC_VDEPTH 0x8002 /* Mac video depth */
+#define BI_MAC_VROW 0x8003 /* Mac video rowbytes */
+#define BI_MAC_VDIM 0x8004 /* Mac video dimensions */
+#define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */
+#define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */
+#define BI_MAC_BTIME 0x8007 /* Mac boot time */
+#define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */
+#define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */
+#define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */
+#define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */
+
+
+ /*
+ * Macintosh hardware profile data - unused, see macintosh.h for
+ * reasonable type values
+ */
+
+#define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */
+#define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */
+#define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */
+#define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */
+#define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */
+#define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */
+#define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */
+#define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */
+#define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */
+#define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */
+#define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */
+#define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */
+#define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */
+#define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */
+#define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */
+#define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */
+#define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */
+#define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */
+
+
+ /*
+ * Macintosh Gestalt numbers (BI_MAC_MODEL)
+ */
+
+#define MAC_MODEL_II 6
+#define MAC_MODEL_IIX 7
+#define MAC_MODEL_IICX 8
+#define MAC_MODEL_SE30 9
+#define MAC_MODEL_IICI 11
+#define MAC_MODEL_IIFX 13 /* And well numbered it is too */
+#define MAC_MODEL_IISI 18
+#define MAC_MODEL_LC 19
+#define MAC_MODEL_Q900 20
+#define MAC_MODEL_PB170 21
+#define MAC_MODEL_Q700 22
+#define MAC_MODEL_CLII 23 /* aka: P200 */
+#define MAC_MODEL_PB140 25
+#define MAC_MODEL_Q950 26 /* aka: WGS95 */
+#define MAC_MODEL_LCIII 27 /* aka: P450 */
+#define MAC_MODEL_PB210 29
+#define MAC_MODEL_C650 30
+#define MAC_MODEL_PB230 32
+#define MAC_MODEL_PB180 33
+#define MAC_MODEL_PB160 34
+#define MAC_MODEL_Q800 35 /* aka: WGS80 */
+#define MAC_MODEL_Q650 36
+#define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */
+#define MAC_MODEL_PB250 38
+#define MAC_MODEL_IIVI 44
+#define MAC_MODEL_P600 45 /* aka: P600CD */
+#define MAC_MODEL_IIVX 48
+#define MAC_MODEL_CCL 49 /* aka: P250 */
+#define MAC_MODEL_PB165C 50
+#define MAC_MODEL_C610 52 /* aka: WGS60 */
+#define MAC_MODEL_Q610 53
+#define MAC_MODEL_PB145 54 /* aka: PB145B */
+#define MAC_MODEL_P520 56 /* aka: LC520 */
+#define MAC_MODEL_C660 60
+#define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */
+#define MAC_MODEL_PB180C 71
+#define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */
+#define MAC_MODEL_PB270C 77
+#define MAC_MODEL_Q840 78
+#define MAC_MODEL_P550 80 /* aka: LC550, P560 */
+#define MAC_MODEL_CCLII 83 /* aka: P275 */
+#define MAC_MODEL_PB165 84
+#define MAC_MODEL_PB190 85 /* aka: PB190CS */
+#define MAC_MODEL_TV 88
+#define MAC_MODEL_P475 89 /* aka: LC475, P476 */
+#define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */
+#define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */
+#define MAC_MODEL_Q605 94
+#define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */
+#define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */
+#define MAC_MODEL_P588 99 /* aka: LC580, P580 */
+#define MAC_MODEL_PB280 102
+#define MAC_MODEL_PB280C 103
+#define MAC_MODEL_PB150 115
+
+
+ /*
+ * Latest Macintosh bootinfo version
+ */
+
+#define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo-virt.h b/include/standard-headers/asm-m68k/bootinfo-virt.h
new file mode 100644
index 0000000000..75ac6bbd7d
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo-virt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+** asm/bootinfo-virt.h -- Virtual-m68k-specific boot information definitions
+*/
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_VIRT_H
+#define _UAPI_ASM_M68K_BOOTINFO_VIRT_H
+
+#define BI_VIRT_QEMU_VERSION 0x8000
+#define BI_VIRT_GF_PIC_BASE 0x8001
+#define BI_VIRT_GF_RTC_BASE 0x8002
+#define BI_VIRT_GF_TTY_BASE 0x8003
+#define BI_VIRT_VIRTIO_BASE 0x8004
+#define BI_VIRT_CTRL_BASE 0x8005
+
+/* No longer used -- replaced with BI_RNG_SEED -- but don't reuse this index:
+ * #define BI_VIRT_RNG_SEED 0x8006 */
+
+#define VIRT_BOOTI_VERSION MK_BI_VERSION(2, 0)
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
diff --git a/include/standard-headers/asm-m68k/bootinfo.h b/include/standard-headers/asm-m68k/bootinfo.h
new file mode 100644
index 0000000000..b7a8dd2514
--- /dev/null
+++ b/include/standard-headers/asm-m68k/bootinfo.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure
+ *
+ * Copyright 1992 by Greg Harp
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_M68K_BOOTINFO_H
+#define _UAPI_ASM_M68K_BOOTINFO_H
+
+
+ /*
+ * Bootinfo definitions
+ *
+ * This is an easily parsable and extendable structure containing all
+ * information to be passed from the bootstrap to the kernel.
+ *
+ * This way I hope to keep all future changes back/forewards compatible.
+ * Thus, keep your fingers crossed...
+ *
+ * This structure is copied right after the kernel by the bootstrap
+ * routine.
+ */
+
+struct bi_record {
+ uint16_t tag; /* tag ID */
+ uint16_t size; /* size of record (in bytes) */
+ uint32_t data[0]; /* data */
+};
+
+
+struct mem_info {
+ uint32_t addr; /* physical address of memory chunk */
+ uint32_t size; /* length of memory chunk (in bytes) */
+};
+
+
+ /*
+ * Tag Definitions
+ *
+ * Machine independent tags start counting from 0x0000
+ * Machine dependent tags start counting from 0x8000
+ */
+
+#define BI_LAST 0x0000 /* last record (sentinel) */
+#define BI_MACHTYPE 0x0001 /* machine type (uint32_t) */
+#define BI_CPUTYPE 0x0002 /* cpu type (uint32_t) */
+#define BI_FPUTYPE 0x0003 /* fpu type (uint32_t) */
+#define BI_MMUTYPE 0x0004 /* mmu type (uint32_t) */
+#define BI_MEMCHUNK 0x0005 /* memory chunk address and size */
+ /* (struct mem_info) */
+#define BI_RAMDISK 0x0006 /* ramdisk address and size */
+ /* (struct mem_info) */
+#define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */
+ /* (string) */
+/*
+ * A random seed used to initialize the RNG. Record format:
+ *
+ * - length [ 2 bytes, 16-bit big endian ]
+ * - seed data [ `length` bytes, padded to preserve 4-byte struct alignment ]
+ */
+#define BI_RNG_SEED 0x0008
+
+ /*
+ * Linux/m68k Architectures (BI_MACHTYPE)
+ */
+
+#define MACH_AMIGA 1
+#define MACH_ATARI 2
+#define MACH_MAC 3
+#define MACH_APOLLO 4
+#define MACH_SUN3 5
+#define MACH_MVME147 6
+#define MACH_MVME16x 7
+#define MACH_BVME6000 8
+#define MACH_HP300 9
+#define MACH_Q40 10
+#define MACH_SUN3X 11
+#define MACH_M54XX 12
+#define MACH_M5441X 13
+#define MACH_VIRT 14
+
+
+ /*
+ * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE)
+ *
+ * Note: we may rely on the following equalities:
+ *
+ * CPU_68020 == MMU_68851
+ * CPU_68030 == MMU_68030
+ * CPU_68040 == FPU_68040 == MMU_68040
+ * CPU_68060 == FPU_68060 == MMU_68060
+ */
+
+#define CPUB_68020 0
+#define CPUB_68030 1
+#define CPUB_68040 2
+#define CPUB_68060 3
+#define CPUB_COLDFIRE 4
+
+#define CPU_68020 (1 << CPUB_68020)
+#define CPU_68030 (1 << CPUB_68030)
+#define CPU_68040 (1 << CPUB_68040)
+#define CPU_68060 (1 << CPUB_68060)
+#define CPU_COLDFIRE (1 << CPUB_COLDFIRE)
+
+#define FPUB_68881 0
+#define FPUB_68882 1
+#define FPUB_68040 2 /* Internal FPU */
+#define FPUB_68060 3 /* Internal FPU */
+#define FPUB_SUNFPA 4 /* Sun-3 FPA */
+#define FPUB_COLDFIRE 5 /* ColdFire FPU */
+
+#define FPU_68881 (1 << FPUB_68881)
+#define FPU_68882 (1 << FPUB_68882)
+#define FPU_68040 (1 << FPUB_68040)
+#define FPU_68060 (1 << FPUB_68060)
+#define FPU_SUNFPA (1 << FPUB_SUNFPA)
+#define FPU_COLDFIRE (1 << FPUB_COLDFIRE)
+
+#define MMUB_68851 0
+#define MMUB_68030 1 /* Internal MMU */
+#define MMUB_68040 2 /* Internal MMU */
+#define MMUB_68060 3 /* Internal MMU */
+#define MMUB_APOLLO 4 /* Custom Apollo */
+#define MMUB_SUN3 5 /* Custom Sun-3 */
+#define MMUB_COLDFIRE 6 /* Internal MMU */
+
+#define MMU_68851 (1 << MMUB_68851)
+#define MMU_68030 (1 << MMUB_68030)
+#define MMU_68040 (1 << MMUB_68040)
+#define MMU_68060 (1 << MMUB_68060)
+#define MMU_SUN3 (1 << MMUB_SUN3)
+#define MMU_APOLLO (1 << MMUB_APOLLO)
+#define MMU_COLDFIRE (1 << MMUB_COLDFIRE)
+
+
+ /*
+ * Stuff for bootinfo interface versioning
+ *
+ * At the start of kernel code, a 'struct bootversion' is located.
+ * bootstrap checks for a matching version of the interface before booting
+ * a kernel, to avoid user confusion if kernel and bootstrap don't work
+ * together :-)
+ *
+ * If incompatible changes are made to the bootinfo interface, the major
+ * number below should be stepped (and the minor reset to 0) for the
+ * appropriate machine. If a change is backward-compatible, the minor
+ * should be stepped. "Backwards-compatible" means that booting will work,
+ * but certain features may not.
+ */
+
+#define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */
+#define MK_BI_VERSION(major, minor) (((major) << 16) + (minor))
+#define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff)
+#define BI_VERSION_MINOR(v) ((v) & 0xffff)
+
+struct bootversion {
+ uint16_t branch;
+ uint32_t magic;
+ struct {
+ uint32_t machtype;
+ uint32_t version;
+ } machversions[0];
+} QEMU_PACKED;
+
+
+#endif /* _UAPI_ASM_M68K_BOOTINFO_H */
diff --git a/include/standard-headers/asm-x86/bootparam.h b/include/standard-headers/asm-x86/bootparam.h
index 072e2ed546..b582a105c0 100644
--- a/include/standard-headers/asm-x86/bootparam.h
+++ b/include/standard-headers/asm-x86/bootparam.h
@@ -2,19 +2,7 @@
#ifndef _ASM_X86_BOOTPARAM_H
#define _ASM_X86_BOOTPARAM_H
-/* setup_data/setup_indirect types */
-#define SETUP_NONE 0
-#define SETUP_E820_EXT 1
-#define SETUP_DTB 2
-#define SETUP_PCI 3
-#define SETUP_EFI 4
-#define SETUP_APPLE_PROPERTIES 5
-#define SETUP_JAILHOUSE 6
-
-#define SETUP_INDIRECT (1<<31)
-
-/* SETUP_INDIRECT | max(SETUP_*) */
-#define SETUP_TYPE_MAX (SETUP_INDIRECT | SETUP_JAILHOUSE)
+#include "standard-headers/asm-x86/setup_data.h"
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -36,6 +24,7 @@
#define XLF_EFI_KEXEC (1<<4)
#define XLF_5LEVEL (1<<5)
#define XLF_5LEVEL_ENABLED (1<<6)
+#define XLF_MEM_ENCRYPTION (1<<7)
#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/include/standard-headers/asm-x86/kvm_para.h b/include/standard-headers/asm-x86/kvm_para.h
index 07877d3295..9a011d20f0 100644
--- a/include/standard-headers/asm-x86/kvm_para.h
+++ b/include/standard-headers/asm-x86/kvm_para.h
@@ -8,6 +8,7 @@
* should be used to determine that a VM is running under KVM.
*/
#define KVM_CPUID_SIGNATURE 0x40000000
+#define KVM_SIGNATURE "KVMKVMKVM\0\0\0"
/* This CPUID returns two feature bitmaps in eax, edx. Before enabling
* a particular paravirtualization, the appropriate feature bit should
@@ -32,6 +33,9 @@
#define KVM_FEATURE_POLL_CONTROL 12
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
+#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
+#define KVM_FEATURE_MIGRATION_CONTROL 17
#define KVM_HINTS_REALTIME 0
@@ -53,6 +57,7 @@
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
struct kvm_steal_time {
uint64_t steal;
@@ -87,8 +92,18 @@ struct kvm_clock_pairing {
#define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3)
/* MSR_KVM_ASYNC_PF_INT */
-#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
+#define KVM_ASYNC_PF_VEC_MASK __GENMASK(7, 0)
+/* MSR_KVM_MIGRATION_CONTROL */
+#define KVM_MIGRATION_READY (1 << 0)
+
+/* KVM_HC_MAP_GPA_RANGE */
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0)
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1)
+#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4)
+#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1)
+#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@@ -127,7 +142,6 @@ struct kvm_vcpu_pv_apf_data {
uint32_t token;
uint8_t pad[56];
- uint32_t enabled;
};
#define KVM_PV_EOI_BIT 0
diff --git a/include/standard-headers/asm-x86/setup_data.h b/include/standard-headers/asm-x86/setup_data.h
new file mode 100644
index 0000000000..09355f54c5
--- /dev/null
+++ b/include/standard-headers/asm-x86/setup_data.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_X86_SETUP_DATA_H
+#define _ASM_X86_SETUP_DATA_H
+
+/* setup_data/setup_indirect types */
+#define SETUP_NONE 0
+#define SETUP_E820_EXT 1
+#define SETUP_DTB 2
+#define SETUP_PCI 3
+#define SETUP_EFI 4
+#define SETUP_APPLE_PROPERTIES 5
+#define SETUP_JAILHOUSE 6
+#define SETUP_CC_BLOB 7
+#define SETUP_IMA 8
+#define SETUP_RNG_SEED 9
+#define SETUP_ENUM_MAX SETUP_RNG_SEED
+
+#define SETUP_INDIRECT (1<<31)
+#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
+
+#ifndef __ASSEMBLY__
+
+#include "standard-headers/linux/types.h"
+
+/* extensible setup data list node */
+struct setup_data {
+ uint64_t next;
+ uint32_t type;
+ uint32_t len;
+ uint8_t data[];
+};
+
+/* extensible setup indirect data node */
+struct setup_indirect {
+ uint32_t type;
+ uint32_t reserved; /* Reserved, must be set to zero. */
+ uint64_t len;
+ uint64_t addr;
+};
+
+/*
+ * The E820 memory region entry of the boot protocol ABI:
+ */
+struct boot_e820_entry {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t type;
+} QEMU_PACKED;
+
+/*
+ * The boot loader is passing platform information via this Jailhouse-specific
+ * setup data structure.
+ */
+struct jailhouse_setup_data {
+ struct {
+ uint16_t version;
+ uint16_t compatible_version;
+ } QEMU_PACKED hdr;
+ struct {
+ uint16_t pm_timer_address;
+ uint16_t num_cpus;
+ uint64_t pci_mmconfig_base;
+ uint32_t tsc_khz;
+ uint32_t apic_khz;
+ uint8_t standard_ioapic;
+ uint8_t cpu_ids[255];
+ } QEMU_PACKED v1;
+ struct {
+ uint32_t flags;
+ } QEMU_PACKED v2;
+} QEMU_PACKED;
+
+/*
+ * IMA buffer setup data information from the previous kernel during kexec
+ */
+struct ima_setup_data {
+ uint64_t addr;
+ uint64_t size;
+} QEMU_PACKED;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_SETUP_DATA_H */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
deleted file mode 100644
index a5a1c8234e..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_DEV_API_H__
-#define __PVRDMA_DEV_API_H__
-
-#include "standard-headers/linux/types.h"
-
-#include "pvrdma_verbs.h"
-
-/*
- * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
- * These macros allow us to check for different features if necessary.
- */
-
-#define PVRDMA_ROCEV1_VERSION 17
-#define PVRDMA_ROCEV2_VERSION 18
-#define PVRDMA_PPN64_VERSION 19
-#define PVRDMA_QPHANDLE_VERSION 20
-#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
-
-#define PVRDMA_BOARD_ID 1
-#define PVRDMA_REV_ID 1
-
-/*
- * Masks and accessors for page directory, which is a two-level lookup:
- * page directory -> page table -> page. Only one directory for now, but we
- * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
- * gigabyte for memory regions and so forth.
- */
-
-#define PVRDMA_PDIR_SHIFT 18
-#define PVRDMA_PTABLE_SHIFT 9
-#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
-#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
-#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
-#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
-#define PVRDMA_MAX_FAST_REG_PAGES 128
-
-/*
- * Max MSI-X vectors.
- */
-
-#define PVRDMA_MAX_INTERRUPTS 3
-
-/* Register offsets within PCI resource on BAR1. */
-#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
-#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
-#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
-#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
-#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
-#define PVRDMA_REG_ERR 0x14 /* R: Device error. */
-#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
-#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
-#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
-#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
-
-/* Object flags. */
-#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
-#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
-#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
-#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
-
-/*
- * Atomic operation capability (masked versions are extended atomic
- * operations.
- */
-
-#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
-#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
-#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
-#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
-
-/*
- * Base Memory Management Extension flags to support Fast Reg Memory Regions
- * and Fast Reg Work Requests. Each flag represents a verb operation and we
- * must support all of them to qualify for the BMME device cap.
- */
-
-#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
-#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
-#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
-
-/*
- * GID types. The interpretation of the gid_types bit field in the device
- * capabilities will depend on the device mode. For now, the device only
- * supports RoCE as mode, so only the different GID types for RoCE are
- * defined.
- */
-
-#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
-#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
-
-/*
- * Version checks. This checks whether each version supports specific
- * capabilities from the device.
- */
-
-#define PVRDMA_IS_VERSION17(_dev) \
- (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
- _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
-
-#define PVRDMA_IS_VERSION18(_dev) \
- (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
- (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
- _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
-
-#define PVRDMA_SUPPORTED(_dev) \
- ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
- (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
-
-/*
- * Get capability values based on device version.
- */
-
-#define PVRDMA_GET_CAP(_dev, _old_val, _val) \
- ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
-
-enum pvrdma_pci_resource {
- PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
- PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
- PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
- PVRDMA_PCI_RESOURCE_LAST, /* Last. */
-};
-
-enum pvrdma_device_ctl {
- PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
- PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
- PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
-};
-
-enum pvrdma_intr_vector {
- PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
- PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
- PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
- /* Additional CQ notification vectors. */
-};
-
-enum pvrdma_intr_cause {
- PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
- PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
- PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
-};
-
-enum pvrdma_gos_bits {
- PVRDMA_GOS_BITS_UNK, /* Unknown. */
- PVRDMA_GOS_BITS_32, /* 32-bit. */
- PVRDMA_GOS_BITS_64, /* 64-bit. */
-};
-
-enum pvrdma_gos_type {
- PVRDMA_GOS_TYPE_UNK, /* Unknown. */
- PVRDMA_GOS_TYPE_LINUX, /* Linux. */
-};
-
-enum pvrdma_device_mode {
- PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
- PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
- PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
-};
-
-struct pvrdma_gos_info {
- uint32_t gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
- uint32_t gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
- uint32_t gos_ver:16; /* W: Guest OS version. */
- uint32_t gos_misc:10; /* W: Other. */
- uint32_t pad; /* Pad to 8-byte alignment. */
-};
-
-struct pvrdma_device_caps {
- uint64_t fw_ver; /* R: Query device. */
- uint64_t node_guid;
- uint64_t sys_image_guid;
- uint64_t max_mr_size;
- uint64_t page_size_cap;
- uint64_t atomic_arg_sizes; /* EX verbs. */
- uint32_t ex_comp_mask; /* EX verbs. */
- uint32_t device_cap_flags2; /* EX verbs. */
- uint32_t max_fa_bit_boundary; /* EX verbs. */
- uint32_t log_max_atomic_inline_arg; /* EX verbs. */
- uint32_t vendor_id;
- uint32_t vendor_part_id;
- uint32_t hw_ver;
- uint32_t max_qp;
- uint32_t max_qp_wr;
- uint32_t device_cap_flags;
- uint32_t max_sge;
- uint32_t max_sge_rd;
- uint32_t max_cq;
- uint32_t max_cqe;
- uint32_t max_mr;
- uint32_t max_pd;
- uint32_t max_qp_rd_atom;
- uint32_t max_ee_rd_atom;
- uint32_t max_res_rd_atom;
- uint32_t max_qp_init_rd_atom;
- uint32_t max_ee_init_rd_atom;
- uint32_t max_ee;
- uint32_t max_rdd;
- uint32_t max_mw;
- uint32_t max_raw_ipv6_qp;
- uint32_t max_raw_ethy_qp;
- uint32_t max_mcast_grp;
- uint32_t max_mcast_qp_attach;
- uint32_t max_total_mcast_qp_attach;
- uint32_t max_ah;
- uint32_t max_fmr;
- uint32_t max_map_per_fmr;
- uint32_t max_srq;
- uint32_t max_srq_wr;
- uint32_t max_srq_sge;
- uint32_t max_uar;
- uint32_t gid_tbl_len;
- uint16_t max_pkeys;
- uint8_t local_ca_ack_delay;
- uint8_t phys_port_cnt;
- uint8_t mode; /* PVRDMA_DEVICE_MODE_ */
- uint8_t atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
- uint8_t bmme_flags; /* FRWR Mem Mgmt Extensions */
- uint8_t gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
- uint32_t max_fast_reg_page_list_len;
-};
-
-struct pvrdma_ring_page_info {
- uint32_t num_pages; /* Num pages incl. header. */
- uint32_t reserved; /* Reserved. */
- uint64_t pdir_dma; /* Page directory PA. */
-};
-
-#pragma pack(push, 1)
-
-struct pvrdma_device_shared_region {
- uint32_t driver_version; /* W: Driver version. */
- uint32_t pad; /* Pad to 8-byte align. */
- struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
- uint64_t cmd_slot_dma; /* W: Command slot address. */
- uint64_t resp_slot_dma; /* W: Response slot address. */
- struct pvrdma_ring_page_info async_ring_pages;
- /* W: Async ring page info. */
- struct pvrdma_ring_page_info cq_ring_pages;
- /* W: CQ ring page info. */
- union {
- uint32_t uar_pfn; /* W: UAR pageframe. */
- uint64_t uar_pfn64; /* W: 64-bit UAR page frame. */
- };
- struct pvrdma_device_caps caps; /* R: Device capabilities. */
-};
-
-#pragma pack(pop)
-
-/* Event types. Currently a 1:1 mapping with enum ib_event. */
-enum pvrdma_eqe_type {
- PVRDMA_EVENT_CQ_ERR,
- PVRDMA_EVENT_QP_FATAL,
- PVRDMA_EVENT_QP_REQ_ERR,
- PVRDMA_EVENT_QP_ACCESS_ERR,
- PVRDMA_EVENT_COMM_EST,
- PVRDMA_EVENT_SQ_DRAINED,
- PVRDMA_EVENT_PATH_MIG,
- PVRDMA_EVENT_PATH_MIG_ERR,
- PVRDMA_EVENT_DEVICE_FATAL,
- PVRDMA_EVENT_PORT_ACTIVE,
- PVRDMA_EVENT_PORT_ERR,
- PVRDMA_EVENT_LID_CHANGE,
- PVRDMA_EVENT_PKEY_CHANGE,
- PVRDMA_EVENT_SM_CHANGE,
- PVRDMA_EVENT_SRQ_ERR,
- PVRDMA_EVENT_SRQ_LIMIT_REACHED,
- PVRDMA_EVENT_QP_LAST_WQE_REACHED,
- PVRDMA_EVENT_CLIENT_REREGISTER,
- PVRDMA_EVENT_GID_CHANGE,
-};
-
-/* Event queue element. */
-struct pvrdma_eqe {
- uint32_t type; /* Event type. */
- uint32_t info; /* Handle, other. */
-};
-
-/* CQ notification queue element. */
-struct pvrdma_cqne {
- uint32_t info; /* Handle */
-};
-
-enum {
- PVRDMA_CMD_FIRST,
- PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
- PVRDMA_CMD_QUERY_PKEY,
- PVRDMA_CMD_CREATE_PD,
- PVRDMA_CMD_DESTROY_PD,
- PVRDMA_CMD_CREATE_MR,
- PVRDMA_CMD_DESTROY_MR,
- PVRDMA_CMD_CREATE_CQ,
- PVRDMA_CMD_RESIZE_CQ,
- PVRDMA_CMD_DESTROY_CQ,
- PVRDMA_CMD_CREATE_QP,
- PVRDMA_CMD_MODIFY_QP,
- PVRDMA_CMD_QUERY_QP,
- PVRDMA_CMD_DESTROY_QP,
- PVRDMA_CMD_CREATE_UC,
- PVRDMA_CMD_DESTROY_UC,
- PVRDMA_CMD_CREATE_BIND,
- PVRDMA_CMD_DESTROY_BIND,
- PVRDMA_CMD_CREATE_SRQ,
- PVRDMA_CMD_MODIFY_SRQ,
- PVRDMA_CMD_QUERY_SRQ,
- PVRDMA_CMD_DESTROY_SRQ,
- PVRDMA_CMD_MAX,
-};
-
-enum {
- PVRDMA_CMD_FIRST_RESP = (1 << 31),
- PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
- PVRDMA_CMD_QUERY_PKEY_RESP,
- PVRDMA_CMD_CREATE_PD_RESP,
- PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
- PVRDMA_CMD_CREATE_MR_RESP,
- PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
- PVRDMA_CMD_CREATE_CQ_RESP,
- PVRDMA_CMD_RESIZE_CQ_RESP,
- PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
- PVRDMA_CMD_CREATE_QP_RESP,
- PVRDMA_CMD_MODIFY_QP_RESP,
- PVRDMA_CMD_QUERY_QP_RESP,
- PVRDMA_CMD_DESTROY_QP_RESP,
- PVRDMA_CMD_CREATE_UC_RESP,
- PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
- PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
- PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
- PVRDMA_CMD_CREATE_SRQ_RESP,
- PVRDMA_CMD_MODIFY_SRQ_RESP,
- PVRDMA_CMD_QUERY_SRQ_RESP,
- PVRDMA_CMD_DESTROY_SRQ_RESP,
- PVRDMA_CMD_MAX_RESP,
-};
-
-struct pvrdma_cmd_hdr {
- uint64_t response; /* Key for response lookup. */
- uint32_t cmd; /* PVRDMA_CMD_ */
- uint32_t reserved; /* Reserved. */
-};
-
-struct pvrdma_cmd_resp_hdr {
- uint64_t response; /* From cmd hdr. */
- uint32_t ack; /* PVRDMA_CMD_XXX_RESP */
- uint8_t err; /* Error. */
- uint8_t reserved[3]; /* Reserved. */
-};
-
-struct pvrdma_cmd_query_port {
- struct pvrdma_cmd_hdr hdr;
- uint8_t port_num;
- uint8_t reserved[7];
-};
-
-struct pvrdma_cmd_query_port_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_port_attr attrs;
-};
-
-struct pvrdma_cmd_query_pkey {
- struct pvrdma_cmd_hdr hdr;
- uint8_t port_num;
- uint8_t index;
- uint8_t reserved[6];
-};
-
-struct pvrdma_cmd_query_pkey_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint16_t pkey;
- uint8_t reserved[6];
-};
-
-struct pvrdma_cmd_create_uc {
- struct pvrdma_cmd_hdr hdr;
- union {
- uint32_t pfn; /* UAR page frame number */
- uint64_t pfn64; /* 64-bit UAR page frame number */
- };
-};
-
-struct pvrdma_cmd_create_uc_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_uc {
- struct pvrdma_cmd_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_pd {
- struct pvrdma_cmd_hdr hdr;
- uint32_t ctx_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_pd_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t pd_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_pd {
- struct pvrdma_cmd_hdr hdr;
- uint32_t pd_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_mr {
- struct pvrdma_cmd_hdr hdr;
- uint64_t start;
- uint64_t length;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t access_flags;
- uint32_t flags;
- uint32_t nchunks;
-};
-
-struct pvrdma_cmd_create_mr_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t mr_handle;
- uint32_t lkey;
- uint32_t rkey;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_mr {
- struct pvrdma_cmd_hdr hdr;
- uint32_t mr_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_cq {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t ctx_handle;
- uint32_t cqe;
- uint32_t nchunks;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_cq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t cq_handle;
- uint32_t cqe;
-};
-
-struct pvrdma_cmd_resize_cq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t cq_handle;
- uint32_t cqe;
-};
-
-struct pvrdma_cmd_resize_cq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t cqe;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_cq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t cq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_srq {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t nchunks;
- struct pvrdma_srq_attr attrs;
- uint8_t srq_type;
- uint8_t reserved[7];
-};
-
-struct pvrdma_cmd_create_srq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t srqn;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_modify_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint32_t attr_mask;
- struct pvrdma_srq_attr attrs;
-};
-
-struct pvrdma_cmd_query_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_query_srq_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_srq_attr attrs;
-};
-
-struct pvrdma_cmd_destroy_srq {
- struct pvrdma_cmd_hdr hdr;
- uint32_t srq_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_qp {
- struct pvrdma_cmd_hdr hdr;
- uint64_t pdir_dma;
- uint32_t pd_handle;
- uint32_t send_cq_handle;
- uint32_t recv_cq_handle;
- uint32_t srq_handle;
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
- uint32_t lkey;
- uint32_t access_flags;
- uint16_t total_chunks;
- uint16_t send_chunks;
- uint16_t max_atomic_arg;
- uint8_t sq_sig_all;
- uint8_t qp_type;
- uint8_t is_srq;
- uint8_t reserved[3];
-};
-
-struct pvrdma_cmd_create_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t qpn;
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
-};
-
-struct pvrdma_cmd_create_qp_resp_v2 {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t qpn;
- uint32_t qp_handle;
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
-};
-
-struct pvrdma_cmd_modify_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint32_t attr_mask;
- struct pvrdma_qp_attr attrs;
-};
-
-struct pvrdma_cmd_query_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint32_t attr_mask;
-};
-
-struct pvrdma_cmd_query_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_qp_attr attrs;
-};
-
-struct pvrdma_cmd_destroy_qp {
- struct pvrdma_cmd_hdr hdr;
- uint32_t qp_handle;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_destroy_qp_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- uint32_t events_reported;
- uint8_t reserved[4];
-};
-
-struct pvrdma_cmd_create_bind {
- struct pvrdma_cmd_hdr hdr;
- uint32_t mtu;
- uint32_t vlan;
- uint32_t index;
- uint8_t new_gid[16];
- uint8_t gid_type;
- uint8_t reserved[3];
-};
-
-struct pvrdma_cmd_destroy_bind {
- struct pvrdma_cmd_hdr hdr;
- uint32_t index;
- uint8_t dest_gid[16];
- uint8_t reserved[4];
-};
-
-union pvrdma_cmd_req {
- struct pvrdma_cmd_hdr hdr;
- struct pvrdma_cmd_query_port query_port;
- struct pvrdma_cmd_query_pkey query_pkey;
- struct pvrdma_cmd_create_uc create_uc;
- struct pvrdma_cmd_destroy_uc destroy_uc;
- struct pvrdma_cmd_create_pd create_pd;
- struct pvrdma_cmd_destroy_pd destroy_pd;
- struct pvrdma_cmd_create_mr create_mr;
- struct pvrdma_cmd_destroy_mr destroy_mr;
- struct pvrdma_cmd_create_cq create_cq;
- struct pvrdma_cmd_resize_cq resize_cq;
- struct pvrdma_cmd_destroy_cq destroy_cq;
- struct pvrdma_cmd_create_qp create_qp;
- struct pvrdma_cmd_modify_qp modify_qp;
- struct pvrdma_cmd_query_qp query_qp;
- struct pvrdma_cmd_destroy_qp destroy_qp;
- struct pvrdma_cmd_create_bind create_bind;
- struct pvrdma_cmd_destroy_bind destroy_bind;
- struct pvrdma_cmd_create_srq create_srq;
- struct pvrdma_cmd_modify_srq modify_srq;
- struct pvrdma_cmd_query_srq query_srq;
- struct pvrdma_cmd_destroy_srq destroy_srq;
-};
-
-union pvrdma_cmd_resp {
- struct pvrdma_cmd_resp_hdr hdr;
- struct pvrdma_cmd_query_port_resp query_port_resp;
- struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
- struct pvrdma_cmd_create_uc_resp create_uc_resp;
- struct pvrdma_cmd_create_pd_resp create_pd_resp;
- struct pvrdma_cmd_create_mr_resp create_mr_resp;
- struct pvrdma_cmd_create_cq_resp create_cq_resp;
- struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
- struct pvrdma_cmd_create_qp_resp create_qp_resp;
- struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
- struct pvrdma_cmd_query_qp_resp query_qp_resp;
- struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
- struct pvrdma_cmd_create_srq_resp create_srq_resp;
- struct pvrdma_cmd_query_srq_resp query_srq_resp;
-};
-
-#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
deleted file mode 100644
index acd4c8346d..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_RING_H__
-#define __PVRDMA_RING_H__
-
-#include "standard-headers/linux/types.h"
-
-#define PVRDMA_INVALID_IDX -1 /* Invalid index. */
-
-struct pvrdma_ring {
- int prod_tail; /* Producer tail. */
- int cons_head; /* Consumer head. */
-};
-
-struct pvrdma_ring_state {
- struct pvrdma_ring tx; /* Tx ring. */
- struct pvrdma_ring rx; /* Rx ring. */
-};
-
-static inline int pvrdma_idx_valid(uint32_t idx, uint32_t max_elems)
-{
- /* Generates fewer instructions than a less-than. */
- return (idx & ~((max_elems << 1) - 1)) == 0;
-}
-
-static inline int32_t pvrdma_idx(int *var, uint32_t max_elems)
-{
- const unsigned int idx = atomic_read(var);
-
- if (pvrdma_idx_valid(idx, max_elems))
- return idx & (max_elems - 1);
- return PVRDMA_INVALID_IDX;
-}
-
-static inline void pvrdma_idx_ring_inc(int *var, uint32_t max_elems)
-{
- uint32_t idx = atomic_read(var) + 1; /* Increment. */
-
- idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
- atomic_set(var, idx);
-}
-
-static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
- uint32_t max_elems, uint32_t *out_tail)
-{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
-
- if (pvrdma_idx_valid(tail, max_elems) &&
- pvrdma_idx_valid(head, max_elems)) {
- *out_tail = tail & (max_elems - 1);
- return tail != (head ^ max_elems);
- }
- return PVRDMA_INVALID_IDX;
-}
-
-static inline int32_t pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
- uint32_t max_elems, uint32_t *out_head)
-{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
-
- if (pvrdma_idx_valid(tail, max_elems) &&
- pvrdma_idx_valid(head, max_elems)) {
- *out_head = head & (max_elems - 1);
- return tail != head;
- }
- return PVRDMA_INVALID_IDX;
-}
-
-#endif /* __PVRDMA_RING_H__ */
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
deleted file mode 100644
index 1677208a41..0000000000
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PVRDMA_VERBS_H__
-#define __PVRDMA_VERBS_H__
-
-#include "standard-headers/linux/types.h"
-
-union pvrdma_gid {
- uint8_t raw[16];
- struct {
- uint64_t subnet_prefix;
- uint64_t interface_id;
- } global;
-};
-
-enum pvrdma_link_layer {
- PVRDMA_LINK_LAYER_UNSPECIFIED,
- PVRDMA_LINK_LAYER_INFINIBAND,
- PVRDMA_LINK_LAYER_ETHERNET,
-};
-
-enum pvrdma_mtu {
- PVRDMA_MTU_256 = 1,
- PVRDMA_MTU_512 = 2,
- PVRDMA_MTU_1024 = 3,
- PVRDMA_MTU_2048 = 4,
- PVRDMA_MTU_4096 = 5,
-};
-
-static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
-{
- switch (mtu) {
- case PVRDMA_MTU_256: return 256;
- case PVRDMA_MTU_512: return 512;
- case PVRDMA_MTU_1024: return 1024;
- case PVRDMA_MTU_2048: return 2048;
- case PVRDMA_MTU_4096: return 4096;
- default: return -1;
- }
-}
-
-static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
-{
- switch (mtu) {
- case 256: return PVRDMA_MTU_256;
- case 512: return PVRDMA_MTU_512;
- case 1024: return PVRDMA_MTU_1024;
- case 2048: return PVRDMA_MTU_2048;
- case 4096:
- default: return PVRDMA_MTU_4096;
- }
-}
-
-enum pvrdma_port_state {
- PVRDMA_PORT_NOP = 0,
- PVRDMA_PORT_DOWN = 1,
- PVRDMA_PORT_INIT = 2,
- PVRDMA_PORT_ARMED = 3,
- PVRDMA_PORT_ACTIVE = 4,
- PVRDMA_PORT_ACTIVE_DEFER = 5,
-};
-
-enum pvrdma_port_cap_flags {
- PVRDMA_PORT_SM = 1 << 1,
- PVRDMA_PORT_NOTICE_SUP = 1 << 2,
- PVRDMA_PORT_TRAP_SUP = 1 << 3,
- PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
- PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
- PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
- PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
- PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
- PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
- PVRDMA_PORT_SM_DISABLED = 1 << 10,
- PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
- PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
- PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
- PVRDMA_PORT_CM_SUP = 1 << 16,
- PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
- PVRDMA_PORT_REINIT_SUP = 1 << 18,
- PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
- PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
- PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
- PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
- PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
- PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
- PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
- PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
- PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
-};
-
-enum pvrdma_port_width {
- PVRDMA_WIDTH_1X = 1,
- PVRDMA_WIDTH_4X = 2,
- PVRDMA_WIDTH_8X = 4,
- PVRDMA_WIDTH_12X = 8,
-};
-
-static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
-{
- switch (width) {
- case PVRDMA_WIDTH_1X: return 1;
- case PVRDMA_WIDTH_4X: return 4;
- case PVRDMA_WIDTH_8X: return 8;
- case PVRDMA_WIDTH_12X: return 12;
- default: return -1;
- }
-}
-
-enum pvrdma_port_speed {
- PVRDMA_SPEED_SDR = 1,
- PVRDMA_SPEED_DDR = 2,
- PVRDMA_SPEED_QDR = 4,
- PVRDMA_SPEED_FDR10 = 8,
- PVRDMA_SPEED_FDR = 16,
- PVRDMA_SPEED_EDR = 32,
-};
-
-struct pvrdma_port_attr {
- enum pvrdma_port_state state;
- enum pvrdma_mtu max_mtu;
- enum pvrdma_mtu active_mtu;
- uint32_t gid_tbl_len;
- uint32_t port_cap_flags;
- uint32_t max_msg_sz;
- uint32_t bad_pkey_cntr;
- uint32_t qkey_viol_cntr;
- uint16_t pkey_tbl_len;
- uint16_t lid;
- uint16_t sm_lid;
- uint8_t lmc;
- uint8_t max_vl_num;
- uint8_t sm_sl;
- uint8_t subnet_timeout;
- uint8_t init_type_reply;
- uint8_t active_width;
- uint8_t active_speed;
- uint8_t phys_state;
- uint8_t reserved[2];
-};
-
-struct pvrdma_global_route {
- union pvrdma_gid dgid;
- uint32_t flow_label;
- uint8_t sgid_index;
- uint8_t hop_limit;
- uint8_t traffic_class;
- uint8_t reserved;
-};
-
-struct pvrdma_grh {
- uint32_t version_tclass_flow;
- uint16_t paylen;
- uint8_t next_hdr;
- uint8_t hop_limit;
- union pvrdma_gid sgid;
- union pvrdma_gid dgid;
-};
-
-enum pvrdma_ah_flags {
- PVRDMA_AH_GRH = 1,
-};
-
-enum pvrdma_rate {
- PVRDMA_RATE_PORT_CURRENT = 0,
- PVRDMA_RATE_2_5_GBPS = 2,
- PVRDMA_RATE_5_GBPS = 5,
- PVRDMA_RATE_10_GBPS = 3,
- PVRDMA_RATE_20_GBPS = 6,
- PVRDMA_RATE_30_GBPS = 4,
- PVRDMA_RATE_40_GBPS = 7,
- PVRDMA_RATE_60_GBPS = 8,
- PVRDMA_RATE_80_GBPS = 9,
- PVRDMA_RATE_120_GBPS = 10,
- PVRDMA_RATE_14_GBPS = 11,
- PVRDMA_RATE_56_GBPS = 12,
- PVRDMA_RATE_112_GBPS = 13,
- PVRDMA_RATE_168_GBPS = 14,
- PVRDMA_RATE_25_GBPS = 15,
- PVRDMA_RATE_100_GBPS = 16,
- PVRDMA_RATE_200_GBPS = 17,
- PVRDMA_RATE_300_GBPS = 18,
-};
-
-struct pvrdma_ah_attr {
- struct pvrdma_global_route grh;
- uint16_t dlid;
- uint16_t vlan_id;
- uint8_t sl;
- uint8_t src_path_bits;
- uint8_t static_rate;
- uint8_t ah_flags;
- uint8_t port_num;
- uint8_t dmac[6];
- uint8_t reserved;
-};
-
-enum pvrdma_cq_notify_flags {
- PVRDMA_CQ_SOLICITED = 1 << 0,
- PVRDMA_CQ_NEXT_COMP = 1 << 1,
- PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
- PVRDMA_CQ_NEXT_COMP,
- PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
-};
-
-struct pvrdma_qp_cap {
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t max_recv_sge;
- uint32_t max_inline_data;
- uint32_t reserved;
-};
-
-enum pvrdma_sig_type {
- PVRDMA_SIGNAL_ALL_WR,
- PVRDMA_SIGNAL_REQ_WR,
-};
-
-enum pvrdma_qp_type {
- PVRDMA_QPT_SMI,
- PVRDMA_QPT_GSI,
- PVRDMA_QPT_RC,
- PVRDMA_QPT_UC,
- PVRDMA_QPT_UD,
- PVRDMA_QPT_RAW_IPV6,
- PVRDMA_QPT_RAW_ETHERTYPE,
- PVRDMA_QPT_RAW_PACKET = 8,
- PVRDMA_QPT_XRC_INI = 9,
- PVRDMA_QPT_XRC_TGT,
- PVRDMA_QPT_MAX,
-};
-
-enum pvrdma_qp_create_flags {
- PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
- PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
-};
-
-enum pvrdma_qp_attr_mask {
- PVRDMA_QP_STATE = 1 << 0,
- PVRDMA_QP_CUR_STATE = 1 << 1,
- PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
- PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
- PVRDMA_QP_PKEY_INDEX = 1 << 4,
- PVRDMA_QP_PORT = 1 << 5,
- PVRDMA_QP_QKEY = 1 << 6,
- PVRDMA_QP_AV = 1 << 7,
- PVRDMA_QP_PATH_MTU = 1 << 8,
- PVRDMA_QP_TIMEOUT = 1 << 9,
- PVRDMA_QP_RETRY_CNT = 1 << 10,
- PVRDMA_QP_RNR_RETRY = 1 << 11,
- PVRDMA_QP_RQ_PSN = 1 << 12,
- PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
- PVRDMA_QP_ALT_PATH = 1 << 14,
- PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
- PVRDMA_QP_SQ_PSN = 1 << 16,
- PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
- PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
- PVRDMA_QP_CAP = 1 << 19,
- PVRDMA_QP_DEST_QPN = 1 << 20,
- PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
-};
-
-enum pvrdma_qp_state {
- PVRDMA_QPS_RESET,
- PVRDMA_QPS_INIT,
- PVRDMA_QPS_RTR,
- PVRDMA_QPS_RTS,
- PVRDMA_QPS_SQD,
- PVRDMA_QPS_SQE,
- PVRDMA_QPS_ERR,
-};
-
-enum pvrdma_mig_state {
- PVRDMA_MIG_MIGRATED,
- PVRDMA_MIG_REARM,
- PVRDMA_MIG_ARMED,
-};
-
-enum pvrdma_mw_type {
- PVRDMA_MW_TYPE_1 = 1,
- PVRDMA_MW_TYPE_2 = 2,
-};
-
-struct pvrdma_srq_attr {
- uint32_t max_wr;
- uint32_t max_sge;
- uint32_t srq_limit;
- uint32_t reserved;
-};
-
-struct pvrdma_qp_attr {
- enum pvrdma_qp_state qp_state;
- enum pvrdma_qp_state cur_qp_state;
- enum pvrdma_mtu path_mtu;
- enum pvrdma_mig_state path_mig_state;
- uint32_t qkey;
- uint32_t rq_psn;
- uint32_t sq_psn;
- uint32_t dest_qp_num;
- uint32_t qp_access_flags;
- uint16_t pkey_index;
- uint16_t alt_pkey_index;
- uint8_t en_sqd_async_notify;
- uint8_t sq_draining;
- uint8_t max_rd_atomic;
- uint8_t max_dest_rd_atomic;
- uint8_t min_rnr_timer;
- uint8_t port_num;
- uint8_t timeout;
- uint8_t retry_cnt;
- uint8_t rnr_retry;
- uint8_t alt_port_num;
- uint8_t alt_timeout;
- uint8_t reserved[5];
- struct pvrdma_qp_cap cap;
- struct pvrdma_ah_attr ah_attr;
- struct pvrdma_ah_attr alt_ah_attr;
-};
-
-enum pvrdma_send_flags {
- PVRDMA_SEND_FENCE = 1 << 0,
- PVRDMA_SEND_SIGNALED = 1 << 1,
- PVRDMA_SEND_SOLICITED = 1 << 2,
- PVRDMA_SEND_INLINE = 1 << 3,
- PVRDMA_SEND_IP_CSUM = 1 << 4,
- PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
-};
-
-enum pvrdma_access_flags {
- PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
- PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
- PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
- PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
- PVRDMA_ACCESS_MW_BIND = 1 << 4,
- PVRDMA_ZERO_BASED = 1 << 5,
- PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
- PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
-};
-
-#endif /* __PVRDMA_VERBS_H__ */
diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h
index 909a66753c..b72917073d 100644
--- a/include/standard-headers/drm/drm_fourcc.h
+++ b/include/standard-headers/drm/drm_fourcc.h
@@ -53,16 +53,52 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users mustn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
@@ -74,12 +110,42 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 16 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -143,6 +209,13 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
@@ -154,6 +227,12 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -161,7 +240,9 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
@@ -235,6 +316,14 @@ extern "C" {
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
+ * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
+ */
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -264,6 +353,29 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
+/* 3 plane non-subsampled (444) YCbCr
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cb plane, [15:0] Cb:x [10:6] little endian
+ * index 2: Cr plane, [15:0] Cr:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
+
+/* 3 plane non-subsampled (444) YCrCb
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cr plane, [15:0] Cr:x [10:6] little endian
+ * index 2: Cb plane, [15:0] Cb:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
+
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@@ -297,7 +409,6 @@ extern "C" {
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -308,11 +419,18 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((uint64_t)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
@@ -322,8 +440,33 @@ extern "C" {
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
+ *
+ * Generic modifier names:
+ *
+ * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
+ * for layouts which are common across multiple vendors. To preserve
+ * compatibility, in cases where a vendor-specific definition already exists and
+ * a generic name for it is desired, the common name is a purely symbolic alias
+ * and must use the same numerical value as the original definition.
+ *
+ * Note that generic names should only be used for modifiers which describe
+ * generic layouts (such as pixel re-ordering), which may have
+ * independently-developed support across multiple vendors.
+ *
+ * In future cases where a generic layout is identified before merging with a
+ * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
+ * 'NONE' could be considered. This should only be for obvious, exceptional
+ * cases to avoid polluting the 'GENERIC' namespace with modifiers which only
+ * apply to a single vendor.
+ *
+ * Generic names should not be used for cases where multiple hardware vendors
+ * have implementations of the same standardised compression scheme (such as
+ * AFBC). In those cases, all implementations should use the same format
+ * modifier(s), reflecting the vendor of the standard.
*/
+#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
+
/*
* Invalid Modifier
*
@@ -343,6 +486,16 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
@@ -386,7 +539,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -440,6 +593,115 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -476,6 +738,28 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
/* Vivante framebuffer modifiers */
/*
@@ -516,6 +800,35 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+/*
+ * Vivante TS (tile-status) buffer modifiers. They can be combined with all of
+ * the color buffer tiling modifiers defined above. When TS is present it's a
+ * separate buffer containing the clear/compression status of each tile. The
+ * modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
+ * tile size in bytes covered by one entry in the status buffer and s is the
+ * number of status bits per entry.
+ * We reserve the top 8 bits of the Vivante modifier space for tile status
+ * clear/compression modifiers, as future cores might add some more TS layout
+ * variations.
+ */
+#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
+#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
+#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
+#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
+#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
+
+/*
+ * Vivante compression modifiers. Those depend on a TS modifier being present
+ * as the TS bits get reinterpreted as compression tags instead of simple
+ * clear markers when compression is enabled.
+ */
+#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
+#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
+
+/* Masking out the extension bits will yield the base modifier. */
+#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
+ VIVANTE_MOD_COMP_MASK)
+
/* NVIDIA frame buffer modifiers */
/*
@@ -728,6 +1041,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@@ -785,10 +1102,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
- * modifiers) denote the category for modifiers. Currently we have only two
- * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
- * different categories.
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
*/
#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
fourcc_mod_code(ARM, ((uint64_t)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
@@ -891,6 +1208,121 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
*/
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
+/* AFBC uncompressed storage mode
+ *
+ * Indicates that the buffer is using AFBC uncompressed storage mode.
+ * In this mode all superblock payloads in the buffer use the uncompressed
+ * storage mode, which is usually only used for data which cannot be compressed.
+ * The buffer layout is the same as for AFBC buffers without USM set, this only
+ * affects the storage mode of the individual superblocks. Note that even a
+ * buffer without USM set may use uncompressed storage mode for some or all
+ * superblocks, USM just guarantees it for all.
+ */
+#define AFBC_FORMAT_MOD_USM (1ULL << 12)
+
+/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
/*
* Arm 16x16 Block U-Interleaved modifier
*
@@ -915,6 +1347,222 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier)
*/
#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+/*
+ * Amlogic Video Framebuffer Compression modifiers
+ *
+ * Amlogic uses a proprietary lossless image compression protocol and format
+ * for their hardware video codec accelerators, either video decoders or
+ * video input encoders.
+ *
+ * It considerably reduces memory bandwidth while writing and reading
+ * frames in memory.
+ *
+ * The underlying storage is considered to be 3 components, 8bit or 10-bit
+ * per component YCbCr 420, single plane :
+ * - DRM_FORMAT_YUV420_8BIT
+ * - DRM_FORMAT_YUV420_10BIT
+ *
+ * The first 8 bits of the mode defines the layout, then the following 8 bits
+ * defines the options changing the layout.
+ *
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+#define __fourcc_mod_amlogic_layout_mask 0xff
+#define __fourcc_mod_amlogic_options_shift 8
+#define __fourcc_mod_amlogic_options_mask 0xff
+
+#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+ ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
+ (((__options) & __fourcc_mod_amlogic_options_mask) \
+ << __fourcc_mod_amlogic_options_shift))
+
+/* Amlogic FBC Layouts */
+
+/*
+ * Amlogic FBC Basic Layout
+ *
+ * The basic layout is composed of:
+ * - a body content organized in 64x32 superblocks with 4096 bytes per
+ * superblock in default mode.
+ * - a 32 bytes per 128x64 header block
+ *
+ * This layout is transferrable between Amlogic SoCs supporting this modifier.
+ */
+#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
+
+/*
+ * Amlogic FBC Scatter Memory layout
+ *
+ * Indicates the header contains IOMMU references to the compressed
+ * frames content to optimize memory access and layout.
+ *
+ * In this mode, only the header memory address is needed, thus the
+ * content memory organization is tied to the current producer
+ * execution and cannot be saved/dumped neither transferrable between
+ * Amlogic SoCs supporting this modifier.
+ *
+ * Due to the nature of the layout, these buffers are not expected to
+ * be accessible by the user-space clients, but only accessible by the
+ * hardware producers and consumers.
+ *
+ * The user-space clients should expect a failure while trying to mmap
+ * the DMA-BUF handle returned by the producer.
+ */
+#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
+
+/* Amlogic FBC Layout Options Bit Mask */
+
+/*
+ * Amlogic FBC Memory Saving mode
+ *
+ * Indicates the storage is packed when pixel size is multiple of word
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
+ * memory.
+ *
+ * This mode reduces body layout to 3072 bytes per 64x32 superblock with
+ * the basic layout and 3200 bytes per 64x32 superblock combined with
+ * the scatter layout.
+ */
+#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+
+/*
+ * AMD modifiers
+ *
+ * Memory layout:
+ *
+ * without DCC:
+ * - main surface
+ *
+ * with DCC & without DCC_RETILE:
+ * - main surface in plane 0
+ * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
+ *
+ * with DCC & DCC_RETILE:
+ * - main surface in plane 0
+ * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
+ * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
+ *
+ * For multi-plane formats the above surfaces get merged into one plane for
+ * each format plane, based on the required alignment only.
+ *
+ * Bits Parameter Notes
+ * ----- ------------------------ ---------------------------------------------
+ *
+ * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
+ * 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
+ * 13 DCC
+ * 14 DCC_RETILE
+ * 15 DCC_PIPE_ALIGN
+ * 16 DCC_INDEPENDENT_64B
+ * 17 DCC_INDEPENDENT_128B
+ * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
+ * 20 DCC_CONSTANT_ENCODE
+ * 23:21 PIPE_XOR_BITS Only for some chips
+ * 26:24 BANK_XOR_BITS Only for some chips
+ * 29:27 PACKERS Only for some chips
+ * 32:30 RB Only for some chips
+ * 35:33 PIPE Only for some chips
+ * 55:36 - Reserved for future use, must be zero
+ */
+#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
+
+#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
+
+/* Reserve 0 for GFX8 and older */
+#define AMD_FMT_MOD_TILE_VER_GFX9 1
+#define AMD_FMT_MOD_TILE_VER_GFX10 2
+#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
+
+/*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+ * version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
+
+/*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
+ */
+#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
+#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
+#define AMD_FMT_MOD_DCC_BLOCK_64B 0
+#define AMD_FMT_MOD_DCC_BLOCK_128B 1
+#define AMD_FMT_MOD_DCC_BLOCK_256B 2
+
+#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
+#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
+#define AMD_FMT_MOD_TILE_SHIFT 8
+#define AMD_FMT_MOD_TILE_MASK 0x1F
+
+/* Whether DCC compression is enabled. */
+#define AMD_FMT_MOD_DCC_SHIFT 13
+#define AMD_FMT_MOD_DCC_MASK 0x1
+
+/*
+ * Whether to include two DCC surfaces, one which is rb & pipe aligned, and
+ * one which is not-aligned.
+ */
+#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
+#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
+
+/* Only set if DCC_RETILE = false */
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
+#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
+
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
+#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
+#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
+#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
+
+/*
+ * DCC supports embedding some clear colors directly in the DCC surface.
+ * However, on older GPUs the rendering HW ignores the embedded clear color
+ * and prefers the driver provided color. This necessitates doing a fastclear
+ * eliminate operation before a process transfers control.
+ *
+ * If this bit is set that means the fastclear eliminate is not needed for these
+ * embeddable colors.
+ */
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
+#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
+
+/*
+ * The below fields are for accounting for per GPU differences. These are only
+ * relevant for GFX9 and later and if the tile field is *_X/_T.
+ *
+ * PIPE_XOR_BITS = always needed
+ * BANK_XOR_BITS = only for TILE_VER_GFX9
+ * PACKERS = only for TILE_VER_GFX10_RBPLUS
+ * RB = only for TILE_VER_GFX9 & DCC
+ * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
+ */
+#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
+#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
+#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
+#define AMD_FMT_MOD_PACKERS_SHIFT 27
+#define AMD_FMT_MOD_PACKERS_MASK 0x7
+#define AMD_FMT_MOD_RB_SHIFT 30
+#define AMD_FMT_MOD_RB_MASK 0x7
+#define AMD_FMT_MOD_PIPE_SHIFT 33
+#define AMD_FMT_MOD_PIPE_MASK 0x7
+
+#define AMD_FMT_MOD_SET(field, value) \
+ ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+#define AMD_FMT_MOD_GET(field, value) \
+ (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+#define AMD_FMT_MOD_CLEAR(field) \
+ (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/standard-headers/linux/const.h b/include/standard-headers/linux/const.h
new file mode 100644
index 0000000000..1eb84b5087
--- /dev/null
+++ b/include/standard-headers/linux/const.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* const.h: Macros for dealing with constants. */
+
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
+
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
+ *
+ * Similarly, _AT() will cast an expression with a type in C, but
+ * leave it unchanged in asm.
+ */
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y) X
+#define _AT(T,X) X
+#else
+#define __AC(X,Y) (X##Y)
+#define _AC(X,Y) __AC(X,Y)
+#define _AT(T,X) ((T)(X))
+#endif
+
+#define _UL(x) (_AC(x, UL))
+#define _ULL(x) (_AC(x, ULL))
+
+#define _BITUL(x) (_UL(1) << (x))
+#define _BITULL(x) (_ULL(1) << (x))
+
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#endif /* _LINUX_CONST_H */
diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h
index fd8d2cccfe..01503784d2 100644
--- a/include/standard-headers/linux/ethtool.h
+++ b/include/standard-headers/linux/ethtool.h
@@ -16,7 +16,7 @@
#include "net/eth.h"
-#include "standard-headers/linux/kernel.h"
+#include "standard-headers/linux/const.h"
#include "standard-headers/linux/types.h"
#include "standard-headers/linux/if_ether.h"
@@ -26,6 +26,14 @@
* have the same layout for 32-bit and 64-bit userland.
*/
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
/**
* struct ethtool_cmd - DEPRECATED, link control and status
* This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
* and other link features that the link partner advertised
* through autonegotiation; 0 if unknown or not applicable.
* Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* The link speed in Mbps is split between @speed and @speed_hi. Use
* the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -150,11 +159,14 @@ static inline uint32_t ethtool_cmd_speed(const struct ethtool_cmd *ep)
* in its bus driver structure (e.g. pci_driver::name). Must
* not be an empty string.
* @version: Driver version string; may be an empty string
- * @fw_version: Firmware version string; may be an empty string
- * @erom_version: Expansion ROM version string; may be an empty string
+ * @fw_version: Firmware version string; driver defined; may be an
+ * empty string
+ * @erom_version: Expansion ROM version string; driver defined; may be
+ * an empty string
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
* @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
* %ETHTOOL_SPFLAGS commands; also the number of strings in the
* %ETH_SS_PRIV_FLAGS set
@@ -169,10 +181,6 @@ static inline uint32_t ethtool_cmd_speed(const struct ethtool_cmd *ep)
*
* Users can use the %ETHTOOL_GSSET_INFO command to get the number of
* strings in any string set (from Linux 2.6.34).
- *
- * Drivers should set at most @driver, @version, @fw_version and
- * @bus_info in their get_drvinfo() implementation. The ethtool
- * core fills in the other fields using other driver operations.
*/
struct ethtool_drvinfo {
uint32_t cmd;
@@ -221,9 +229,10 @@ enum tunable_id {
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -246,7 +255,7 @@ struct ethtool_tunable {
uint32_t id;
uint32_t type_id;
uint32_t len;
- void *data[0];
+ void *data[];
};
#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
@@ -287,7 +296,7 @@ enum phy_tunable_id {
ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
@@ -311,7 +320,7 @@ struct ethtool_regs {
uint32_t cmd;
uint32_t version;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
@@ -337,7 +346,7 @@ struct ethtool_eeprom {
uint32_t magic;
uint32_t offset;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
@@ -356,6 +365,7 @@ struct ethtool_eeprom {
* @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
* its tx lpi (after reaching 'idle' state). Effective only when eee
* was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
*/
struct ethtool_eee {
uint32_t cmd;
@@ -374,6 +384,7 @@ struct ethtool_eee {
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
* @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* This structure is used to return the information to
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,6 +590,70 @@ struct ethtool_pauseparam {
uint32_t tx_pause;
};
+/* Link extended state */
+enum ethtool_link_ext_state {
+ ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE,
+ ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
+ ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+ ETHTOOL_LINK_EXT_STATE_MODULE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
+enum ethtool_link_ext_substate_autoneg {
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+ */
+enum ethtool_link_ext_substate_link_training {
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+ */
+enum ethtool_link_ext_substate_link_logical_mismatch {
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+ */
+enum ethtool_link_ext_substate_bad_signal_integrity {
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
+enum ethtool_link_ext_substate_cable_issue {
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
+};
+
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_MODULE. */
+enum ethtool_link_ext_substate_module {
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY = 1,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -591,6 +666,7 @@ struct ethtool_pauseparam {
* now deprecated
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
* @ETH_SS_LINK_MODES: link mode names
@@ -599,6 +675,14 @@ struct ethtool_pauseparam {
* @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
* @ETH_SS_TS_TX_TYPES: timestamping Tx types
* @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
+ * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ * @ETH_SS_STATS_STD: standardized stats
+ * @ETH_SS_STATS_ETH_PHY: names of IEEE 802.3 PHY statistics
+ * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
+ * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
+ * @ETH_SS_STATS_RMON: names of RMON statistics
+ *
+ * @ETH_SS_COUNT: number of defined string sets
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -616,12 +700,129 @@ enum ethtool_stringset {
ETH_SS_SOF_TIMESTAMPING,
ETH_SS_TS_TX_TYPES,
ETH_SS_TS_RX_FILTERS,
+ ETH_SS_UDP_TUNNEL_TYPES,
+ ETH_SS_STATS_STD,
+ ETH_SS_STATS_ETH_PHY,
+ ETH_SS_STATS_ETH_MAC,
+ ETH_SS_STATS_ETH_CTRL,
+ ETH_SS_STATS_RMON,
/* add new constants above here */
ETH_SS_COUNT
};
/**
+ * enum ethtool_mac_stats_src - source of ethtool MAC statistics
+ * @ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ * if device supports a MAC merge layer, this retrieves the aggregate
+ * statistics of the eMAC and pMAC. Otherwise, it retrieves just the
+ * statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_EMAC:
+ * if device supports a MM layer, this retrieves the eMAC statistics.
+ * Otherwise, it retrieves the statistics of the single (express) MAC.
+ * @ETHTOOL_MAC_STATS_SRC_PMAC:
+ * if device supports a MM layer, this retrieves the pMAC statistics.
+ */
+enum ethtool_mac_stats_src {
+ ETHTOOL_MAC_STATS_SRC_AGGREGATE,
+ ETHTOOL_MAC_STATS_SRC_EMAC,
+ ETHTOOL_MAC_STATS_SRC_PMAC,
+};
+
+/**
+ * enum ethtool_module_power_mode_policy - plug-in module power mode policy
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO: Module is transitioned by the host
+ * to high power mode when the first port using it is put administratively
+ * up and to low power mode when the last port using it is put
+ * administratively down.
+ */
+enum ethtool_module_power_mode_policy {
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH = 1,
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO,
+};
+
+/**
+ * enum ethtool_module_power_mode - plug-in module power mode
+ * @ETHTOOL_MODULE_POWER_MODE_LOW: Module is in low power mode.
+ * @ETHTOOL_MODULE_POWER_MODE_HIGH: Module is in high power mode.
+ */
+enum ethtool_module_power_mode {
+ ETHTOOL_MODULE_POWER_MODE_LOW = 1,
+ ETHTOOL_MODULE_POWER_MODE_HIGH,
+};
+
+/**
+ * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are
+ * unknown
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: PoDL PSE functions are disabled
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: PoDL PSE functions are enabled
+ */
+enum ethtool_podl_pse_admin_state {
+ ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED,
+};
+
+/**
+ * enum ethtool_podl_pse_pw_d_status - power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN: PoDL PSE
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED: "The enumeration “disabled” is
+ * asserted true when the PoDL PSE state diagram variable mr_pse_enable is
+ * false"
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING: "The enumeration “searching” is
+ * asserted true when either of the PSE state diagram variables
+ * pi_detecting or pi_classifying is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING: "The enumeration “deliveringPower”
+ * is asserted true when the PoDL PSE state diagram variable pi_powered is
+ * true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP: "The enumeration “sleep” is asserted
+ * true when the PoDL PSE state diagram variable pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE: "The enumeration “idle” is asserted true
+ * when the logical combination of the PoDL PSE state diagram variables
+ * pi_prebiased*!pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR: "The enumeration “error” is asserted
+ * true when the PoDL PSE state diagram variable overload_held is true."
+ */
+enum ethtool_podl_pse_pw_d_status {
+ ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR,
+};
+
+/**
+ * enum ethtool_mm_verify_status - status of MAC Merge Verify function
+ * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN:
+ * verification status is unknown
+ * @ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ * the 802.3 Verify State diagram is in the state INIT_VERIFICATION
+ * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ * the Verify State diagram is in the state VERIFICATION_IDLE,
+ * SEND_VERIFY or WAIT_FOR_RESPONSE
+ * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ * indicates that the Verify State diagram is in the state VERIFIED
+ * @ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ * the Verify State diagram is in the state VERIFY_FAIL
+ * @ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ * verification of preemption operation is disabled
+ */
+enum ethtool_mm_verify_status {
+ ETHTOOL_MM_VERIFY_STATUS_UNKNOWN,
+ ETHTOOL_MM_VERIFY_STATUS_INITIAL,
+ ETHTOOL_MM_VERIFY_STATUS_VERIFYING,
+ ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED,
+ ETHTOOL_MM_VERIFY_STATUS_FAILED,
+ ETHTOOL_MM_VERIFY_STATUS_DISABLED,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -637,12 +838,13 @@ struct ethtool_gstrings {
uint32_t cmd;
uint32_t string_set;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
/**
* struct ethtool_sset_info - string set information
* @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
* @sset_mask: On entry, a bitmask of string sets to query, with bits
* numbered according to &enum ethtool_stringset. On return, a
* bitmask of those string sets queried that are supported.
@@ -661,7 +863,7 @@ struct ethtool_sset_info {
uint32_t cmd;
uint32_t reserved;
uint64_t sset_mask;
- uint32_t data[0];
+ uint32_t data[];
};
/**
@@ -687,6 +889,7 @@ enum ethtool_test_flags {
* @flags: A bitmask of flags from &enum ethtool_test_flags. Some
* flags may be set by the user on entry; others may be set by
* the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
* @len: On return, the number of test results
* @data: Array of test results
*
@@ -700,7 +903,7 @@ struct ethtool_test {
uint32_t flags;
uint32_t reserved;
uint32_t len;
- uint64_t data[0];
+ uint64_t data[];
};
/**
@@ -717,7 +920,7 @@ struct ethtool_test {
struct ethtool_stats {
uint32_t cmd;
uint32_t n_stats;
- uint64_t data[0];
+ uint64_t data[];
};
/**
@@ -734,7 +937,7 @@ struct ethtool_stats {
struct ethtool_perm_addr {
uint32_t cmd;
uint32_t size;
- uint8_t data[0];
+ uint8_t data[];
};
/* boolean flags controlling per-interface behavior characteristics.
@@ -887,6 +1090,7 @@ union ethtool_flow_union {
* @vlan_etype: VLAN EtherType
* @vlan_tci: VLAN tag control information
* @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
*
* Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
* is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1022,7 +1226,7 @@ struct ethtool_rxnfc {
uint32_t rule_cnt;
uint32_t rss_context;
};
- uint32_t rule_locs[0];
+ uint32_t rule_locs[];
};
@@ -1042,7 +1246,7 @@ struct ethtool_rxnfc {
struct ethtool_rxfh_indir {
uint32_t cmd;
uint32_t size;
- uint32_t ring_index[0];
+ uint32_t ring_index[];
};
/**
@@ -1062,7 +1266,10 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd: Reserved for future extensions.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size uint32_t elements, followed by hash key of @key_size
* bytes.
@@ -1080,9 +1287,10 @@ struct ethtool_rxfh {
uint32_t indir_size;
uint32_t key_size;
uint8_t hfunc;
- uint8_t rsvd8[3];
+ uint8_t input_xfrm;
+ uint8_t rsvd8[2];
uint32_t rsvd32;
- uint32_t rss_config[0];
+ uint32_t rss_config[];
};
#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1167,7 +1375,7 @@ struct ethtool_dump {
uint32_t version;
uint32_t flag;
uint32_t len;
- uint8_t data[0];
+ uint8_t data[];
};
#define ETH_FW_DUMP_DISABLE 0
@@ -1199,7 +1407,7 @@ struct ethtool_get_features_block {
struct ethtool_gfeatures {
uint32_t cmd;
uint32_t size;
- struct ethtool_get_features_block features[0];
+ struct ethtool_get_features_block features[];
};
/**
@@ -1221,7 +1429,7 @@ struct ethtool_set_features_block {
struct ethtool_sfeatures {
uint32_t cmd;
uint32_t size;
- struct ethtool_set_features_block features[0];
+ struct ethtool_set_features_block features[];
};
/**
@@ -1230,7 +1438,9 @@ struct ethtool_sfeatures {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
*
* The bits in the 'tx_types' and 'rx_filters' fields correspond to
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1304,15 +1514,33 @@ struct ethtool_per_queue_op {
};
/**
- * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
+ * struct ethtool_fecparam - Ethernet Forward Error Correction parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
- * @active_fec: FEC mode which is active on porte
- * @fec: Bitmask of supported/configured FEC modes
- * @rsvd: Reserved for future extensions. i.e FEC bypass feature.
+ * @active_fec: FEC mode which is active on the port, single bit set, GET only.
+ * @fec: Bitmask of configured FEC modes.
+ * @reserved: Reserved for future extensions, ignore on GET, write 0 for SET.
*
- * Drivers should reject a non-zero setting of @autoneg when
- * autoneogotiation is disabled (or not supported) for the link.
+ * Note that @reserved was never validated on input and ethtool user space
+ * left it uninitialized when calling SET. Hence going forward it can only be
+ * used to return a value to userspace with GET.
+ *
+ * FEC modes supported by the device can be read via %ETHTOOL_GLINKSETTINGS.
+ * FEC settings are configured by link autonegotiation whenever it's enabled.
+ * With autoneg on %ETHTOOL_GFECPARAM can be used to read the current mode.
*
+ * When autoneg is disabled %ETHTOOL_SFECPARAM controls the FEC settings.
+ * It is recommended that drivers only accept a single bit set in @fec.
+ * When multiple bits are set in @fec drivers may pick mode in an implementation
+ * dependent way. Drivers should reject mixing %ETHTOOL_FEC_AUTO_BIT with other
+ * FEC modes, because it's unclear whether in this case other modes constrain
+ * AUTO or are independent choices.
+ * Drivers must reject SET requests if they support none of the requested modes.
+ *
+ * If device does not support FEC drivers may use %ETHTOOL_FEC_NONE instead
+ * of returning %EOPNOTSUPP from %ETHTOOL_GFECPARAM.
+ *
+ * See enum ethtool_fec_config_bits for definition of valid bits for both
+ * @fec and @active_fec.
*/
struct ethtool_fecparam {
uint32_t cmd;
@@ -1324,11 +1552,16 @@ struct ethtool_fecparam {
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
- * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
- * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
- * @ETHTOOL_FEC_OFF: No FEC Mode
- * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
- * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
+ * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported. Should not
+ * be used together with other bits. GET only.
+ * @ETHTOOL_FEC_AUTO_BIT: Select default/best FEC mode automatically, usually
+ * based link mode and SFP parameters read from module's
+ * EEPROM. This bit does _not_ mean autonegotiation.
+ * @ETHTOOL_FEC_OFF_BIT: No FEC Mode
+ * @ETHTOOL_FEC_RS_BIT: Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon FEC Mode
+ * @ETHTOOL_FEC_LLRS_BIT: Low Latency Reed Solomon FEC Mode (25G/50G Ethernet
+ * Consortium)
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
@@ -1530,6 +1763,34 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79,
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84,
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
+ ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT = 93,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT = 94,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT = 95,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98,
+ ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99,
+ ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101,
+
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
@@ -1641,6 +1902,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_100000 100000
#define SPEED_200000 200000
#define SPEED_400000 400000
+#define SPEED_800000 800000
#define SPEED_UNKNOWN -1
@@ -1678,6 +1940,20 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define MASTER_SLAVE_STATE_SLAVE 3
#define MASTER_SLAVE_STATE_ERR 4
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -1719,6 +1995,15 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define WOL_MODE_COUNT 8
+/* RSS hash function data
+ * XOR the corresponding source and destination fields of each specified
+ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
+ * calculation. Note that this XORing reduces the input set entropy and could
+ * be exploited to reduce the RSS queue spread.
+ */
+#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@@ -1738,6 +2023,53 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
+
+/* Used for GTP-U IPv4 and IPv6.
+ * The format of GTP packets only includes
+ * elements such as TEID and GTP version.
+ * It is primarily intended for data communication of the UE.
+ */
+#define GTPU_V4_FLOW 0x13 /* hash only */
+#define GTPU_V6_FLOW 0x14 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * The format of these GTP packets does not include TEID.
+ * Primarily expected to be used for communication
+ * to create sessions for UE data communication,
+ * commonly referred to as CSR (Create Session Request).
+ */
+#define GTPC_V4_FLOW 0x15 /* hash only */
+#define GTPC_V6_FLOW 0x16 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
+ * After session creation, it becomes this packet.
+ * This is mainly used for requests to realize UE handover.
+ */
+#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
+#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
+
+/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
+ * The format of these GTP packets includes TEID and QFI.
+ * In 5G communication using UPF (User Plane Function),
+ * data communication with this extended header is performed.
+ */
+#define GTPU_EH_V4_FLOW 0x19 /* hash only */
+#define GTPU_EH_V6_FLOW 0x1a /* hash only */
+
+/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
+ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
+ * UL/DL included in the PSC.
+ * There are differences in the data included based on Downlink/Uplink,
+ * and can be used to distinguish packets.
+ * The functions described so far are useful when you want to
+ * handle communication from the mobile network in UPF, PGW, etc.
+ */
+#define GTPU_UL_V4_FLOW 0x1b /* hash only */
+#define GTPU_UL_V6_FLOW 0x1c /* hash only */
+#define GTPU_DL_V4_FLOW 0x1d /* hash only */
+#define GTPU_DL_V6_FLOW 0x1e /* hash only */
+
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@@ -1752,6 +2084,7 @@ static inline int ethtool_validate_duplex(uint8_t duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
@@ -1855,20 +2188,13 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
- * @supported: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features for which the interface
- * supports autonegotiation or auto-detection. Read-only.
- * @advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features that are advertised through
- * autonegotiation or enabled for auto-detection.
- * @lp_advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, and other
- * link features that the link partner advertised through
- * autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1903,6 +2229,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
+ *
+ * @link_mode_masks is divided into three bitfields, each of length
+ * @link_mode_masks_nwords:
+ * - supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * - advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * - lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
uint32_t cmd;
@@ -1918,9 +2259,9 @@ struct ethtool_link_settings {
uint8_t transceiver;
uint8_t master_slave_cfg;
uint8_t master_slave_state;
- uint8_t reserved1[1];
+ uint8_t rate_matching;
uint32_t reserved[7];
- uint32_t link_mode_masks[0];
+ uint32_t link_mode_masks[];
/* layout of link_mode_masks fields:
* uint32_t map_supported[link_mode_masks_nwords];
* uint32_t map_advertising[link_mode_masks_nwords];
diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h
index f4df0a40f6..bac9dbc49f 100644
--- a/include/standard-headers/linux/fuse.h
+++ b/include/standard-headers/linux/fuse.h
@@ -172,6 +172,51 @@
* - add FUSE_WRITE_KILL_PRIV flag
* - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING
* - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag
+ *
+ * 7.32
+ * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS
+ *
+ * 7.33
+ * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID
+ * - add FUSE_OPEN_KILL_SUIDGID
+ * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT
+ * - add FUSE_SETXATTR_ACL_KILL_SGID
+ *
+ * 7.34
+ * - add FUSE_SYNCFS
+ *
+ * 7.35
+ * - add FOPEN_NOFLUSH
+ *
+ * 7.36
+ * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag
+ * - add flags2 to fuse_init_in and fuse_init_out
+ * - add FUSE_SECURITY_CTX init flag
+ * - add security context to create, mkdir, symlink, and mknod requests
+ * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
+ *
+ * 7.37
+ * - add FUSE_TMPFILE
+ *
+ * 7.38
+ * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
+ * - add FOPEN_PARALLEL_DIRECT_WRITES
+ * - add total_extlen to fuse_in_header
+ * - add FUSE_MAX_NR_SECCTX
+ * - add extension header
+ * - add FUSE_EXT_GROUPS
+ * - add FUSE_CREATE_SUPP_GROUP
+ * - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_ALLOW_MMAP
+ * - add FUSE_STATX and related structures
+ *
+ * 7.40
+ * - add max_stack_depth to fuse_init_out, add FUSE_PASSTHROUGH init flag
+ * - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
+ * - add FUSE_NO_EXPORT_SUPPORT init flag
+ * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
*/
#ifndef _LINUX_FUSE_H
@@ -203,7 +248,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 31
+#define FUSE_KERNEL_MINOR_VERSION 40
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -227,7 +272,41 @@ struct fuse_attr {
uint32_t gid;
uint32_t rdev;
uint32_t blksize;
- uint32_t padding;
+ uint32_t flags;
+};
+
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
};
struct fuse_kstatfs {
@@ -264,6 +343,7 @@ struct fuse_file_lock {
#define FATTR_MTIME_NOW (1 << 8)
#define FATTR_LOCKOWNER (1 << 9)
#define FATTR_CTIME (1 << 10)
+#define FATTR_KILL_SUIDGID (1 << 11)
/**
* Flags returned by the OPEN request
@@ -273,12 +353,18 @@ struct fuse_file_lock {
* FOPEN_NONSEEKABLE: the file is not seekable
* FOPEN_CACHE_DIR: allow caching this directory
* FOPEN_STREAM: the file is stream-like (no file position at all)
+ * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
+ * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
+ * FOPEN_PASSTHROUGH: passthrough read/write io for this open file
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
#define FOPEN_CACHE_DIR (1 << 3)
#define FOPEN_STREAM (1 << 4)
+#define FOPEN_NOFLUSH (1 << 5)
+#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6)
+#define FOPEN_PASSTHROUGH (1 << 7)
/**
* INIT request/reply flags
@@ -309,7 +395,28 @@ struct fuse_file_lock {
* FUSE_CACHE_SYMLINKS: cache READLINK responses
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
* FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
- * FUSE_MAP_ALIGNMENT: map_alignment field is valid
+ * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for
+ * foffset and moffset fields in struct
+ * fuse_setupmapping_out and fuse_removemapping_one.
+ * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts
+ * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc.
+ * Upon write/truncate suid/sgid is only killed if caller
+ * does not have CAP_FSETID. Additionally upon
+ * write/truncate sgid is killed only if file has group
+ * execute permission. (Same as Linux VFS behavior).
+ * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in
+ * FUSE_INIT_EXT: extended fuse_init_in request
+ * FUSE_INIT_RESERVED: reserved, do not use
+ * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
+ * mknod
+ * FUSE_HAS_INODE_DAX: use per inode DAX
+ * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
+ * symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
+ * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
+ * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
+ * of the request ID indicates resend requests
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -338,6 +445,23 @@ struct fuse_file_lock {
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
#define FUSE_MAP_ALIGNMENT (1 << 26)
+#define FUSE_SUBMOUNTS (1 << 27)
+#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28)
+#define FUSE_SETXATTR_EXT (1 << 29)
+#define FUSE_INIT_EXT (1 << 30)
+#define FUSE_INIT_RESERVED (1 << 31)
+/* bits 32..63 get shifted down 32 bits into the flags2 field */
+#define FUSE_SECURITY_CTX (1ULL << 32)
+#define FUSE_HAS_INODE_DAX (1ULL << 33)
+#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+#define FUSE_PASSTHROUGH (1ULL << 37)
+#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
+#define FUSE_HAS_RESEND (1ULL << 39)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
/**
* CUSE INIT request/reply flags
@@ -367,11 +491,14 @@ struct fuse_file_lock {
*
* FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
* FUSE_WRITE_LOCKOWNER: lock_owner field is valid
- * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits
+ * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits
*/
#define FUSE_WRITE_CACHE (1 << 0)
#define FUSE_WRITE_LOCKOWNER (1 << 1)
-#define FUSE_WRITE_KILL_PRIV (1 << 2)
+#define FUSE_WRITE_KILL_SUIDGID (1 << 2)
+
+/* Obsolete alias; this flag implies killing suid/sgid only. */
+#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID
/**
* Read flags
@@ -413,6 +540,44 @@ struct fuse_file_lock {
*/
#define FUSE_FSYNC_FDATASYNC (1 << 0)
+/**
+ * fuse_attr flags
+ *
+ * FUSE_ATTR_SUBMOUNT: Object is a submount root
+ * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode
+ */
+#define FUSE_ATTR_SUBMOUNT (1 << 0)
+#define FUSE_ATTR_DAX (1 << 1)
+
+/**
+ * Open flags
+ * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable
+ */
+#define FUSE_OPEN_KILL_SUIDGID (1 << 0)
+
+/**
+ * setxattr flags
+ * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set
+ */
+#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0)
+
+/**
+ * notify_inval_entry flags
+ * FUSE_EXPIRE_ONLY
+ */
+#define FUSE_EXPIRE_ONLY (1 << 0)
+
+/**
+ * extension type
+ * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx
+ * FUSE_EXT_GROUPS: &fuse_supp_groups extension
+ */
+enum fuse_ext_type {
+ /* Types 0..31 are reserved for fuse_secctx_header */
+ FUSE_MAX_NR_SECCTX = 31,
+ FUSE_EXT_GROUPS = 32,
+};
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -461,6 +626,9 @@ enum fuse_opcode {
FUSE_COPY_FILE_RANGE = 47,
FUSE_SETUPMAPPING = 48,
FUSE_REMOVEMAPPING = 49,
+ FUSE_SYNCFS = 50,
+ FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -477,6 +645,7 @@ enum fuse_notify_code {
FUSE_NOTIFY_STORE = 4,
FUSE_NOTIFY_RETRIEVE = 5,
FUSE_NOTIFY_DELETE = 6,
+ FUSE_NOTIFY_RESEND = 7,
FUSE_NOTIFY_CODE_MAX,
};
@@ -525,6 +694,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
@@ -574,20 +759,20 @@ struct fuse_setattr_in {
struct fuse_open_in {
uint32_t flags;
- uint32_t unused;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_create_in {
uint32_t flags;
uint32_t mode;
uint32_t umask;
- uint32_t padding;
+ uint32_t open_flags; /* FUSE_OPEN_... */
};
struct fuse_open_out {
uint64_t fh;
uint32_t open_flags;
- uint32_t padding;
+ int32_t backing_id;
};
struct fuse_release_in {
@@ -643,9 +828,13 @@ struct fuse_fsync_in {
uint32_t padding;
};
+#define FUSE_COMPAT_SETXATTR_IN_SIZE 8
+
struct fuse_setxattr_in {
uint32_t size;
uint32_t flags;
+ uint32_t setxattr_flags;
+ uint32_t padding;
};
struct fuse_getxattr_in {
@@ -680,6 +869,8 @@ struct fuse_init_in {
uint32_t minor;
uint32_t max_readahead;
uint32_t flags;
+ uint32_t flags2;
+ uint32_t unused[11];
};
#define FUSE_COMPAT_INIT_OUT_SIZE 8
@@ -696,7 +887,9 @@ struct fuse_init_out {
uint32_t time_gran;
uint16_t max_pages;
uint16_t map_alignment;
- uint32_t unused[8];
+ uint32_t flags2;
+ uint32_t max_stack_depth;
+ uint32_t unused[6];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -779,6 +972,14 @@ struct fuse_fallocate_in {
uint32_t padding;
};
+/**
+ * FUSE request unique ID flag
+ *
+ * Indicates whether this is a resend request. The receiver should handle this
+ * request accordingly.
+ */
+#define FUSE_UNIQUE_RESEND (1ULL << 63)
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
@@ -787,7 +988,8 @@ struct fuse_in_header {
uint32_t uid;
uint32_t gid;
uint32_t pid;
- uint32_t padding;
+ uint16_t total_extlen; /* length of extensions in 8byte units */
+ uint16_t padding;
};
struct fuse_out_header {
@@ -804,9 +1006,12 @@ struct fuse_dirent {
char name[];
};
-#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
+/* Align variable length records to 64bit boundary */
+#define FUSE_REC_ALIGN(x) \
(((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x)
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
@@ -829,7 +1034,7 @@ struct fuse_notify_inval_inode_out {
struct fuse_notify_inval_entry_out {
uint64_t parent;
uint32_t namelen;
- uint32_t padding;
+ uint32_t flags;
};
struct fuse_notify_delete_out {
@@ -864,8 +1069,18 @@ struct fuse_notify_retrieve_in {
uint64_t dummy4;
};
+struct fuse_backing_map {
+ int32_t fd;
+ uint32_t flags;
+ uint64_t padding;
+};
+
/* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_MAGIC 229
+#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
+#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \
+ struct fuse_backing_map)
+#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t)
struct fuse_lseek_in {
uint64_t fh;
@@ -888,4 +1103,83 @@ struct fuse_copy_file_range_in {
uint64_t flags;
};
+#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
+#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
+struct fuse_setupmapping_in {
+ /* An already open handle */
+ uint64_t fh;
+ /* Offset into the file to start the mapping */
+ uint64_t foffset;
+ /* Length of mapping required */
+ uint64_t len;
+ /* Flags, FUSE_SETUPMAPPING_FLAG_* */
+ uint64_t flags;
+ /* Offset in Memory Window */
+ uint64_t moffset;
+};
+
+struct fuse_removemapping_in {
+ /* number of fuse_removemapping_one follows */
+ uint32_t count;
+};
+
+struct fuse_removemapping_one {
+ /* Offset into the dax window start the unmapping */
+ uint64_t moffset;
+ /* Length of mapping required */
+ uint64_t len;
+};
+
+#define FUSE_REMOVEMAPPING_MAX_ENTRY \
+ (PAGE_SIZE / sizeof(struct fuse_removemapping_one))
+
+struct fuse_syncfs_in {
+ uint64_t padding;
+};
+
+/*
+ * For each security context, send fuse_secctx with size of security context
+ * fuse_secctx will be followed by security context name and this in turn
+ * will be followed by actual context label.
+ * fuse_secctx, name, context
+ */
+struct fuse_secctx {
+ uint32_t size;
+ uint32_t padding;
+};
+
+/*
+ * Contains the information about how many fuse_secctx structures are being
+ * sent and what's the total size of all security contexts (including
+ * size of fuse_secctx_header).
+ *
+ */
+struct fuse_secctx_header {
+ uint32_t size;
+ uint32_t nr_secctx;
+};
+
+/**
+ * struct fuse_ext_header - extension header
+ * @size: total size of this extension including this header
+ * @type: type of extension
+ *
+ * This is made compatible with fuse_secctx_header by using type values >
+ * FUSE_MAX_NR_SECCTX
+ */
+struct fuse_ext_header {
+ uint32_t size;
+ uint32_t type;
+};
+
+/**
+ * struct fuse_supp_groups - Supplementary group extension
+ * @nr_groups: number of supplementary groups
+ * @groups: flexible array of group IDs
+ */
+struct fuse_supp_groups {
+ uint32_t nr_groups;
+ uint32_t groups[];
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
index ebf72c1031..2221b0c383 100644
--- a/include/standard-headers/linux/input-event-codes.h
+++ b/include/standard-headers/linux/input-event-codes.h
@@ -278,7 +278,8 @@
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
-#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -515,6 +516,9 @@
#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
#define KEY_IMAGES 0x1ba /* AL Image Browser */
+#define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */
+#define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */
+#define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
@@ -542,6 +546,7 @@
#define KEY_FN_F 0x1e2
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
+#define KEY_FN_RIGHT_SHIFT 0x1e5
#define KEY_BRL_DOT1 0x1f1
#define KEY_BRL_DOT2 0x1f2
@@ -597,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
+#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
@@ -607,6 +613,11 @@
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
+#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
+#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
@@ -655,6 +666,27 @@
/* Select an area of screen to be copied */
#define KEY_SELECTIVE_SCREENSHOT 0x27a
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT 0x27b
+#define KEY_PREVIOUS_ELEMENT 0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT 0x27e
+#define KEY_SOS 0x27f
+#define KEY_NAV_CHART 0x280
+#define KEY_FISHING_CHART 0x281
+#define KEY_SINGLE_RANGE_RADAR 0x282
+#define KEY_DUAL_RANGE_RADAR 0x283
+#define KEY_RADAR_OVERLAY 0x284
+#define KEY_TRADITIONAL_SONAR 0x285
+#define KEY_CLEARVU_SONAR 0x286
+#define KEY_SIDEVU_SONAR 0x287
+#define KEY_NAV_INFO 0x288
+#define KEY_BRIGHTNESS_MENU 0x289
+
/*
* Some keyboards have keys which do not have a defined meaning, these keys
* are intended to be programmed / bound to macros by the user. For most
@@ -834,6 +866,7 @@
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
+#define ABS_PROFILE 0x21
#define ABS_MISC 0x28
@@ -888,7 +921,8 @@
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
-#define SW_MAX_ 0x0f
+#define SW_MACHINE_COVER 0x10 /* set = cover closed */
+#define SW_MAX_ 0x10
#define SW_CNT (SW_MAX_+1)
/*
diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h
index f89c986190..942ea6aaa9 100644
--- a/include/standard-headers/linux/input.h
+++ b/include/standard-headers/linux/input.h
@@ -75,13 +75,16 @@ struct input_id {
* Note that input core does not clamp reported values to the
* [minimum, maximum] limits, such task is left to userspace.
*
- * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
- * is reported in units per millimeter (units/mm), resolution
- * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
- * in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z,
+ * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units
+ * per millimeter (units/mm), resolution for rotational axes
+ * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The resolution for the size axes (ABS_MT_TOUCH_MAJOR,
+ * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR)
+ * is reported in units per millimeter (units/mm).
* When INPUT_PROP_ACCELEROMETER is set the resolution changes.
* The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
- * in units per g (units/g) and in units per degree per second
+ * units per g (units/g) and in units per degree per second
* (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ).
*/
struct input_absinfo {
@@ -268,6 +271,7 @@ struct input_mask {
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
#define BUS_INTEL_ISHTP 0x1F
+#define BUS_AMD_SFH 0x20
/*
* MT_TOOL types
diff --git a/include/standard-headers/linux/kernel.h b/include/standard-headers/linux/kernel.h
index 1eeba2ef92..7848c5ae25 100644
--- a/include/standard-headers/linux/kernel.h
+++ b/include/standard-headers/linux/kernel.h
@@ -3,13 +3,6 @@
#define _LINUX_KERNEL_H
#include "standard-headers/linux/sysinfo.h"
-
-/*
- * 'kernel.h' contains some often-used function prototypes etc
- */
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
-
-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#include "standard-headers/linux/const.h"
#endif /* _LINUX_KERNEL_H */
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
index f9701410d3..a39193213f 100644
--- a/include/standard-headers/linux/pci_regs.h
+++ b/include/standard-headers/linux/pci_regs.h
@@ -76,9 +76,11 @@
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_MASK 0x7f
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
+#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@@ -246,7 +248,7 @@
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
-#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
+#define PCI_PM_CAP_PME_D3hot 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
@@ -300,23 +302,23 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupt registers */
+/* Message Signaled Interrupt registers */
-#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS 0x02 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
-#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
+#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */
+#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */
/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
@@ -334,10 +336,10 @@
/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */
-#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -469,7 +471,7 @@
/* PCI Express capability registers */
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS 0x02 /* Capabilities register */
#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
@@ -483,7 +485,7 @@
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
@@ -496,13 +498,19 @@
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL 0x08 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */
+#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
@@ -515,7 +523,7 @@
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA 0x0a /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
@@ -523,15 +531,18 @@
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
#define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */
+#define PCI_EXP_LNKCAP_SLS_64_0GB 0x00000006 /* LNKCAP2 SLS Vector bit 5 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#define PCI_EXP_LNKCAP_ASPM_L0S 0x00000400 /* ASPM L0s Support */
+#define PCI_EXP_LNKCAP_ASPM_L1 0x00000800 /* ASPM L1 Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */
@@ -539,7 +550,7 @@
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL 0x10 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
@@ -552,13 +563,14 @@
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_LNKSTA 0x12 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
#define PCI_EXP_LNKSTA_CLS_32_0GB 0x0005 /* Current Link Speed 32.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_64_0GB 0x0006 /* Current Link Speed 64.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -571,7 +583,7 @@
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
@@ -584,7 +596,7 @@
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTCTL 0x18 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
@@ -605,8 +617,9 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
+#define PCI_EXP_SLTCTL_ASPL_DISABLE 0x2000 /* Auto Slot Power Limit Disable */
#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
@@ -616,15 +629,16 @@
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
-#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL 0x1c /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
-#define PCI_EXP_RTSTA 32 /* Root Status */
+#define PCI_EXP_RTSTA 0x20 /* Root Status */
+#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -635,7 +649,7 @@
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
@@ -647,7 +661,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
@@ -659,31 +673,34 @@
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
-#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
-#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
+#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
+#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
#define PCI_EXP_LNKCTL2_TLS 0x000f
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
-#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
+#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
-#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
-#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
+#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -720,15 +737,18 @@
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
+#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
+#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
@@ -746,11 +766,11 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
/* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
@@ -759,20 +779,20 @@
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */
/* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
+#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_STATUS 0x30
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
@@ -781,52 +801,59 @@
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
-#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
+#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
/* Virtual Channel */
-#define PCI_VC_PORT_CAP1 4
+#define PCI_VC_PORT_CAP1 0x04
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
-#define PCI_VC_PORT_CAP2 8
+#define PCI_VC_PORT_CAP2 0x08
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
-#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_CTRL 0x0c
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
-#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_PORT_STATUS 0x0e
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
-#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CAP 0x10
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
-#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_CTRL 0x14
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
-#define PCI_VC_RES_STATUS 26
+#define PCI_VC_RES_STATUS 0x1a
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
-#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c
/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DSR 0x04 /* Data Select Register */
+#define PCI_PWR_DATA 0x08 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP 0x0c /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-#define PCI_EXT_CAP_PWR_SIZEOF 16
+#define PCI_EXT_CAP_PWR_SIZEOF 0x10
+
+/* Root Complex Event Collector Endpoint Association */
+#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */
+#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */
+#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */
+#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff)
+#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff)
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
@@ -905,12 +932,13 @@
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
-#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
-#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
-#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
+#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@@ -943,13 +971,15 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
-#define PCI_EXT_CAP_SRIOV_SIZEOF 64
+#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
+#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
@@ -996,12 +1026,12 @@
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
-#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
/* Downstream Port Containment */
-#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
@@ -1009,19 +1039,26 @@
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
-#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
-#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
-#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
@@ -1035,6 +1072,7 @@
/* Precision Time Measurement */
#define PCI_PTM_CAP 0x04 /* PTM Capability */
#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */
+#define PCI_PTM_CAP_RES 0x00000002 /* Responder capable */
#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */
#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */
#define PCI_PTM_CTRL 0x08 /* PTM Control */
@@ -1056,11 +1094,22 @@
#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1_2_MASK 0x00000005
#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
+#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
+
+/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
+#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
+#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff)
+#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf)
+#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff)
+#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */
+#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff)
/* Data Link Feature */
#define PCI_DLF_CAP 0x04 /* Capabilities Register */
@@ -1072,4 +1121,31 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Data Object Exchange */
+#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
+#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
+#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */
+#define PCI_DOE_CTRL 0x08 /* DOE Control Register */
+#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */
+#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */
+#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */
+#define PCI_DOE_STATUS 0x0c /* DOE Status Register */
+#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */
+#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */
+#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */
+#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
+#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
+#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */
+
+/* DOE Data Object - note not actually registers */
+#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff
+
+#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/standard-headers/linux/pvpanic.h b/include/standard-headers/linux/pvpanic.h
new file mode 100644
index 0000000000..54b7485390
--- /dev/null
+++ b/include/standard-headers/linux/pvpanic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __PVPANIC_H__
+#define __PVPANIC_H__
+
+#define PVPANIC_PANICKED (1 << 0)
+#define PVPANIC_CRASH_LOADED (1 << 1)
+
+#endif /* __PVPANIC_H__ */
diff --git a/include/standard-headers/linux/udmabuf.h b/include/standard-headers/linux/udmabuf.h
new file mode 100644
index 0000000000..e19eb5b5ce
--- /dev/null
+++ b/include/standard-headers/linux/udmabuf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_UDMABUF_H
+#define _LINUX_UDMABUF_H
+
+#include "standard-headers/linux/types.h"
+
+#define UDMABUF_FLAGS_CLOEXEC 0x01
+
+struct udmabuf_create {
+ uint32_t memfd;
+ uint32_t flags;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct udmabuf_create_item {
+ uint32_t memfd;
+ uint32_t __pad;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct udmabuf_create_list {
+ uint32_t flags;
+ uint32_t count;
+ struct udmabuf_create_item list[];
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
+
+#endif /* _LINUX_UDMABUF_H */
diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h
index a678d8fbaa..fd54044936 100644
--- a/include/standard-headers/linux/vhost_types.h
+++ b/include/standard-headers/linux/vhost_types.h
@@ -47,6 +47,22 @@ struct vhost_vring_addr {
uint64_t log_guest_addr;
};
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
/* no alignment requirement */
struct vhost_iotlb_msg {
uint64_t iova;
@@ -60,6 +76,17 @@ struct vhost_iotlb_msg {
#define VHOST_IOTLB_UPDATE 2
#define VHOST_IOTLB_INVALIDATE 3
#define VHOST_IOTLB_ACCESS_FAIL 4
+/*
+ * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
+ * multiple mappings in one go: beginning with
+ * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
+ * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
+ * When one of these two values is used as the message type, the rest
+ * of the fields in the message are ignored. There's no guarantee that
+ * these changes take place automatically in the device.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN 5
+#define VHOST_IOTLB_BATCH_END 6
uint8_t type;
};
@@ -76,7 +103,7 @@ struct vhost_msg {
struct vhost_msg_v2 {
uint32_t type;
- uint32_t reserved;
+ uint32_t asid;
union {
struct vhost_iotlb_msg iotlb;
uint8_t padding[64];
@@ -96,7 +123,7 @@ struct vhost_memory_region {
struct vhost_memory {
uint32_t nregions;
uint32_t padding;
- struct vhost_memory_region regions[0];
+ struct vhost_memory_region regions[];
};
/* VHOST_SCSI specific definitions */
@@ -124,7 +151,16 @@ struct vhost_scsi_target {
struct vhost_vdpa_config {
uint32_t off;
uint32_t len;
- uint8_t buf[0];
+ uint8_t buf[];
+};
+
+/* vhost vdpa IOVA range
+ * @first: First address that can be mapped by vhost-vDPA
+ * @last: Last address that can be mapped by vhost-vDPA
+ */
+struct vhost_vdpa_iova_range {
+ uint64_t first;
+ uint64_t last;
};
/* Feature bits */
@@ -133,4 +169,28 @@ struct vhost_vdpa_config {
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
+/* IOTLB can accept address space identifier through V2 type of IOTLB
+ * message
+ */
+#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+/* Device can be suspended */
+#define VHOST_BACKEND_F_SUSPEND 0x4
+/* Device can be resumed */
+#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
+
#endif
diff --git a/include/standard-headers/linux/virtio_9p.h b/include/standard-headers/linux/virtio_9p.h
index e68f71dbe6..da61dee98c 100644
--- a/include/standard-headers/linux/virtio_9p.h
+++ b/include/standard-headers/linux/virtio_9p.h
@@ -25,7 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_types.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
@@ -36,9 +36,9 @@
struct virtio_9p_config {
/* length of the tag name */
- uint16_t tag_len;
+ __virtio16 tag_len;
/* non-NULL terminated tag name */
- uint8_t tag[0];
+ uint8_t tag[];
} QEMU_PACKED;
#endif /* _LINUX_VIRTIO_9P_H */
diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h
index 0229b0fbe4..d7be3cf5e4 100644
--- a/include/standard-headers/linux/virtio_blk.h
+++ b/include/standard-headers/linux/virtio_blk.h
@@ -40,6 +40,8 @@
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
+#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
+#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -55,20 +57,20 @@
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
- uint64_t capacity;
+ __virtio64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
- uint32_t size_max;
+ __virtio32 size_max;
/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
- uint32_t seg_max;
+ __virtio32 seg_max;
/* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry {
- uint16_t cylinders;
+ __virtio16 cylinders;
uint8_t heads;
uint8_t sectors;
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
- uint32_t blk_size;
+ __virtio32 blk_size;
/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
/* exponent for physical block per logical block. */
@@ -76,42 +78,42 @@ struct virtio_blk_config {
/* alignment offset in logical blocks. */
uint8_t alignment_offset;
/* minimum I/O size without performance penalty in logical blocks. */
- uint16_t min_io_size;
+ __virtio16 min_io_size;
/* optimal sustained I/O size in logical blocks. */
- uint32_t opt_io_size;
+ __virtio32 opt_io_size;
/* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
uint8_t wce;
uint8_t unused;
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
- uint16_t num_queues;
+ __virtio16 num_queues;
/* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
/*
* The maximum discard sectors (in 512-byte sectors) for
* one segment.
*/
- uint32_t max_discard_sectors;
+ __virtio32 max_discard_sectors;
/*
* The maximum number of discard segments in a
* discard command.
*/
- uint32_t max_discard_seg;
+ __virtio32 max_discard_seg;
/* Discard commands must be aligned to this number of sectors. */
- uint32_t discard_sector_alignment;
+ __virtio32 discard_sector_alignment;
/* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
/*
* The maximum number of write zeroes sectors (in 512-byte sectors) in
* one segment.
*/
- uint32_t max_write_zeroes_sectors;
+ __virtio32 max_write_zeroes_sectors;
/*
* The maximum number of segments in a write zeroes
* command.
*/
- uint32_t max_write_zeroes_seg;
+ __virtio32 max_write_zeroes_seg;
/*
* Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
* deallocation of one or more of the sectors.
@@ -119,6 +121,31 @@ struct virtio_blk_config {
uint8_t write_zeroes_may_unmap;
uint8_t unused1[3];
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_SECURE_ERASE */
+ /*
+ * The maximum secure erase sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_secure_erase_sectors;
+ /*
+ * The maximum number of secure erase segments in a
+ * secure erase command.
+ */
+ __virtio32 max_secure_erase_seg;
+ /* Secure erase commands must be aligned to this number of sectors. */
+ __virtio32 secure_erase_sector_alignment;
+
+ /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
+ struct virtio_blk_zoned_characteristics {
+ __virtio32 zone_sectors;
+ __virtio32 max_open_zones;
+ __virtio32 max_active_zones;
+ __virtio32 max_append_sectors;
+ __virtio32 write_granularity;
+ uint8_t model;
+ uint8_t unused2[3];
+ } zoned;
} QEMU_PACKED;
/*
@@ -153,6 +180,30 @@ struct virtio_blk_config {
/* Write zeroes command */
#define VIRTIO_BLK_T_WRITE_ZEROES 13
+/* Secure erase command */
+#define VIRTIO_BLK_T_SECURE_ERASE 14
+
+/* Zone append command */
+#define VIRTIO_BLK_T_ZONE_APPEND 15
+
+/* Report zones command */
+#define VIRTIO_BLK_T_ZONE_REPORT 16
+
+/* Open zone command */
+#define VIRTIO_BLK_T_ZONE_OPEN 18
+
+/* Close zone command */
+#define VIRTIO_BLK_T_ZONE_CLOSE 20
+
+/* Finish zone command */
+#define VIRTIO_BLK_T_ZONE_FINISH 22
+
+/* Reset zone command */
+#define VIRTIO_BLK_T_ZONE_RESET 24
+
+/* Reset All zones command */
+#define VIRTIO_BLK_T_ZONE_RESET_ALL 26
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
@@ -172,6 +223,72 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+/*
+ * Supported zoned device models.
+ */
+
+/* Regular block device */
+#define VIRTIO_BLK_Z_NONE 0
+/* Host-managed zoned device */
+#define VIRTIO_BLK_Z_HM 1
+/* Host-aware zoned device */
+#define VIRTIO_BLK_Z_HA 2
+
+/*
+ * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply.
+ */
+struct virtio_blk_zone_descriptor {
+ /* Zone capacity */
+ __virtio64 z_cap;
+ /* The starting sector of the zone */
+ __virtio64 z_start;
+ /* Zone write pointer position in sectors */
+ __virtio64 z_wp;
+ /* Zone type */
+ uint8_t z_type;
+ /* Zone state */
+ uint8_t z_state;
+ uint8_t reserved[38];
+};
+
+struct virtio_blk_zone_report {
+ __virtio64 nr_zones;
+ uint8_t reserved[56];
+ struct virtio_blk_zone_descriptor zones[];
+};
+
+/*
+ * Supported zone types.
+ */
+
+/* Conventional zone */
+#define VIRTIO_BLK_ZT_CONV 1
+/* Sequential Write Required zone */
+#define VIRTIO_BLK_ZT_SWR 2
+/* Sequential Write Preferred zone */
+#define VIRTIO_BLK_ZT_SWP 3
+
+/*
+ * Zone states that are available for zones of all types.
+ */
+
+/* Not a write pointer (conventional zones only) */
+#define VIRTIO_BLK_ZS_NOT_WP 0
+/* Empty */
+#define VIRTIO_BLK_ZS_EMPTY 1
+/* Implicitly Open */
+#define VIRTIO_BLK_ZS_IOPEN 2
+/* Explicitly Open */
+#define VIRTIO_BLK_ZS_EOPEN 3
+/* Closed */
+#define VIRTIO_BLK_ZS_CLOSED 4
+/* Read-Only */
+#define VIRTIO_BLK_ZS_RDONLY 13
+/* Full */
+#define VIRTIO_BLK_ZS_FULL 14
+/* Offline */
+#define VIRTIO_BLK_ZS_OFFLINE 15
+
/* Unmap this range (only valid for write zeroes command) */
#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001
@@ -198,4 +315,11 @@ struct virtio_scsi_inhdr {
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
+
+/* Error codes that are specific to zoned block devices */
+#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3
+#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4
+#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5
+#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6
+
#endif /* _LINUX_VIRTIO_BLK_H */
diff --git a/include/standard-headers/linux/virtio_bt.h b/include/standard-headers/linux/virtio_bt.h
new file mode 100644
index 0000000000..a11ecc3f92
--- /dev/null
+++ b/include/standard-headers/linux/virtio_bt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _LINUX_VIRTIO_BT_H
+#define _LINUX_VIRTIO_BT_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/* Feature bits */
+#define VIRTIO_BT_F_VND_HCI 0 /* Indicates vendor command support */
+#define VIRTIO_BT_F_MSFT_EXT 1 /* Indicates MSFT vendor support */
+#define VIRTIO_BT_F_AOSP_EXT 2 /* Indicates AOSP vendor support */
+#define VIRTIO_BT_F_CONFIG_V2 3 /* Use second version configuration */
+
+enum virtio_bt_config_type {
+ VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
+ VIRTIO_BT_CONFIG_TYPE_AMP = 1,
+};
+
+enum virtio_bt_config_vendor {
+ VIRTIO_BT_CONFIG_VENDOR_NONE = 0,
+ VIRTIO_BT_CONFIG_VENDOR_ZEPHYR = 1,
+ VIRTIO_BT_CONFIG_VENDOR_INTEL = 2,
+ VIRTIO_BT_CONFIG_VENDOR_REALTEK = 3,
+};
+
+struct virtio_bt_config {
+ uint8_t type;
+ uint16_t vendor;
+ uint16_t msft_opcode;
+} QEMU_PACKED;
+
+struct virtio_bt_config_v2 {
+ uint8_t type;
+ uint8_t alignment;
+ uint16_t vendor;
+ uint16_t msft_opcode;
+};
+
+#endif /* _LINUX_VIRTIO_BT_H */
diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
index 9a69d9e242..45be0fa1bc 100644
--- a/include/standard-headers/linux/virtio_config.h
+++ b/include/standard-headers/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 38
+#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -67,18 +67,26 @@
#define VIRTIO_F_VERSION_1 32
/*
- * If clear - device has the IOMMU bypass quirk feature.
- * If set - use platform tools to detect the IOMMU.
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
*
* Note the reverse polarity (compared to most other features),
* this is for compatibility with legacy systems.
*/
-#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_ACCESS_PLATFORM 33
+/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM
/* This feature indicates support for the packed virtqueue layout. */
#define VIRTIO_F_RING_PACKED 34
/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/*
* This feature indicates that memory accesses by the driver and the
* device are ordered in a way described by the platform.
*/
@@ -88,4 +96,26 @@
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
+
+/*
+ * This feature indicates that the driver passes extra data (besides
+ * identifying the virtqueue) in its device notifications.
+ */
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
+/*
+ * This feature indicates that the driver can reset a queue individually.
+ */
+#define VIRTIO_F_RING_RESET 40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ 41
+
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/standard-headers/linux/virtio_console.h b/include/standard-headers/linux/virtio_console.h
index 0dedc9e6f5..71f5f648e3 100644
--- a/include/standard-headers/linux/virtio_console.h
+++ b/include/standard-headers/linux/virtio_console.h
@@ -45,13 +45,13 @@
struct virtio_console_config {
/* colums of the screens */
- uint16_t cols;
+ __virtio16 cols;
/* rows of the screens */
- uint16_t rows;
+ __virtio16 rows;
/* max. number of ports this device can hold */
- uint32_t max_nr_ports;
+ __virtio32 max_nr_ports;
/* emergency write register */
- uint32_t emerg_wr;
+ __virtio32 emerg_wr;
} QEMU_PACKED;
/*
diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h
index 5ff0b4ee59..68066dafb6 100644
--- a/include/standard-headers/linux/virtio_crypto.h
+++ b/include/standard-headers/linux/virtio_crypto.h
@@ -37,6 +37,7 @@
#define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
uint32_t opcode;
uint32_t algo;
uint32_t flag;
@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
uint8_t padding[32];
};
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+ uint32_t padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH 0
+#define VIRTIO_CRYPTO_RSA_MD2 1
+#define VIRTIO_CRYPTO_RSA_MD3 2
+#define VIRTIO_CRYPTO_RSA_MD4 3
+#define VIRTIO_CRYPTO_RSA_MD5 4
+#define VIRTIO_CRYPTO_RSA_SHA1 5
+#define VIRTIO_CRYPTO_RSA_SHA256 6
+#define VIRTIO_CRYPTO_RSA_SHA384 7
+#define VIRTIO_CRYPTO_RSA_SHA512 8
+#define VIRTIO_CRYPTO_RSA_SHA224 9
+ uint32_t hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+ uint32_t curve_id;
+ uint32_t padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER 0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA 1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA 2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+ uint32_t algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+ uint32_t keytype;
+ uint32_t keylen;
+
+ union {
+ struct virtio_crypto_rsa_session_para rsa;
+ struct virtio_crypto_ecdsa_session_para ecdsa;
+ } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+ struct virtio_crypto_akcipher_session_para para;
+ uint8_t padding[36];
+};
+
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
+ struct virtio_crypto_akcipher_create_session_req
+ akcipher_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
uint8_t padding[56];
@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
uint32_t opcode;
/* algo should be service-specific algorithms */
uint32_t algo;
@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
uint8_t padding[32];
};
+struct virtio_crypto_akcipher_para {
+ uint32_t src_data_len;
+ uint32_t dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+ struct virtio_crypto_akcipher_para para;
+ uint8_t padding[40];
+};
+
/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header;
@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req;
+ struct virtio_crypto_akcipher_data_req akcipher_req;
uint8_t padding[48];
} u;
};
@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
@@ -438,7 +518,7 @@ struct virtio_crypto_config {
uint32_t max_cipher_key_len;
/* Maximum length of authenticated key */
uint32_t max_auth_key_len;
- uint32_t reserve;
+ uint32_t akcipher_algo;
/* Maximum size of each crypto request's content */
uint64_t max_size;
};
diff --git a/include/standard-headers/linux/virtio_fs.h b/include/standard-headers/linux/virtio_fs.h
index 9d88817a6b..a32fe8a64c 100644
--- a/include/standard-headers/linux/virtio_fs.h
+++ b/include/standard-headers/linux/virtio_fs.h
@@ -16,4 +16,7 @@ struct virtio_fs_config {
uint32_t num_request_queues;
} QEMU_PACKED;
+/* For the id field in virtio_pci_shm_cap */
+#define VIRTIO_FS_SHMCAP_ID_CACHE 0
+
#endif /* _LINUX_VIRTIO_FS_H */
diff --git a/include/standard-headers/linux/virtio_gpio.h b/include/standard-headers/linux/virtio_gpio.h
new file mode 100644
index 0000000000..2b5cf06349
--- /dev/null
+++ b/include/standard-headers/linux/virtio_gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _LINUX_VIRTIO_GPIO_H
+#define _LINUX_VIRTIO_GPIO_H
+
+#include "standard-headers/linux/types.h"
+
+/* Virtio GPIO Feature bits */
+#define VIRTIO_GPIO_F_IRQ 0
+
+/* Virtio GPIO request types */
+#define VIRTIO_GPIO_MSG_GET_NAMES 0x0001
+#define VIRTIO_GPIO_MSG_GET_DIRECTION 0x0002
+#define VIRTIO_GPIO_MSG_SET_DIRECTION 0x0003
+#define VIRTIO_GPIO_MSG_GET_VALUE 0x0004
+#define VIRTIO_GPIO_MSG_SET_VALUE 0x0005
+#define VIRTIO_GPIO_MSG_IRQ_TYPE 0x0006
+
+/* Possible values of the status field */
+#define VIRTIO_GPIO_STATUS_OK 0x0
+#define VIRTIO_GPIO_STATUS_ERR 0x1
+
+/* Direction types */
+#define VIRTIO_GPIO_DIRECTION_NONE 0x00
+#define VIRTIO_GPIO_DIRECTION_OUT 0x01
+#define VIRTIO_GPIO_DIRECTION_IN 0x02
+
+/* Virtio GPIO IRQ types */
+#define VIRTIO_GPIO_IRQ_TYPE_NONE 0x00
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+struct virtio_gpio_config {
+ uint16_t ngpio;
+ uint8_t padding[2];
+ uint32_t gpio_names_size;
+};
+
+/* Virtio GPIO Request / Response */
+struct virtio_gpio_request {
+ uint16_t type;
+ uint16_t gpio;
+ uint32_t value;
+};
+
+struct virtio_gpio_response {
+ uint8_t status;
+ uint8_t value;
+};
+
+struct virtio_gpio_response_get_names {
+ uint8_t status;
+ uint8_t value[];
+};
+
+/* Virtio GPIO IRQ Request / Response */
+struct virtio_gpio_irq_request {
+ uint16_t gpio;
+};
+
+struct virtio_gpio_irq_response {
+ uint8_t status;
+};
+
+/* Possible values of the interrupt status field */
+#define VIRTIO_GPIO_IRQ_STATUS_INVALID 0x0
+#define VIRTIO_GPIO_IRQ_STATUS_VALID 0x1
+
+#endif /* _LINUX_VIRTIO_GPIO_H */
diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h
index b8fa15f0ac..2db643ed8f 100644
--- a/include/standard-headers/linux/virtio_gpu.h
+++ b/include/standard-headers/linux/virtio_gpu.h
@@ -50,6 +50,20 @@
* VIRTIO_GPU_CMD_GET_EDID
*/
#define VIRTIO_GPU_F_EDID 1
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
+ */
+#define VIRTIO_GPU_F_RESOURCE_UUID 2
+
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -66,6 +80,9 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
+ VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +93,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -87,6 +106,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
+ VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -97,14 +118,29 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ /*
+ * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
+ * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
+ */
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
uint32_t type;
uint32_t flags;
uint64_t fence_id;
uint32_t ctx_id;
- uint32_t padding;
+ uint8_t ring_idx;
+ uint8_t padding[3];
};
/* data passed in the cursor vq */
@@ -244,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
uint32_t nlen;
- uint32_t padding;
+ uint32_t context_init;
char debug_name[64];
};
@@ -272,6 +309,8 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
+/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_VENUS 4
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
@@ -340,4 +379,80 @@ enum virtio_gpu_formats {
VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
};
+/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */
+struct virtio_gpu_resource_assign_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */
+struct virtio_gpu_resp_resource_uuid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint8_t uuid[16];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ uint32_t blob_mem;
+ uint32_t blob_flags;
+ uint32_t nr_entries;
+ uint64_t blob_id;
+ uint64_t size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ uint32_t scanout_id;
+ uint32_t resource_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ uint32_t padding;
+ uint32_t strides[4];
+ uint32_t offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+ uint64_t offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t map_info;
+ uint32_t padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ uint32_t resource_id;
+ uint32_t padding;
+};
+
#endif
diff --git a/include/standard-headers/linux/virtio_i2c.h b/include/standard-headers/linux/virtio_i2c.h
new file mode 100644
index 0000000000..09fa907793
--- /dev/null
+++ b/include/standard-headers/linux/virtio_i2c.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Definitions for virtio I2C Adpter
+ *
+ * Copyright (c) 2021 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _LINUX_VIRTIO_I2C_H
+#define _LINUX_VIRTIO_I2C_H
+
+#include "standard-headers/linux/const.h"
+#include "standard-headers/linux/types.h"
+
+/* Virtio I2C Feature bits */
+#define VIRTIO_I2C_F_ZERO_LENGTH_REQUEST 0
+
+/* The bit 0 of the @virtio_i2c_out_hdr.@flags, used to group the requests */
+#define VIRTIO_I2C_FLAGS_FAIL_NEXT _BITUL(0)
+
+/* The bit 1 of the @virtio_i2c_out_hdr.@flags, used to mark a buffer as read */
+#define VIRTIO_I2C_FLAGS_M_RD _BITUL(1)
+
+/**
+ * struct virtio_i2c_out_hdr - the virtio I2C message OUT header
+ * @addr: the controlled device address
+ * @padding: used to pad to full dword
+ * @flags: used for feature extensibility
+ */
+struct virtio_i2c_out_hdr {
+ uint16_t addr;
+ uint16_t padding;
+ uint32_t flags;
+};
+
+/**
+ * struct virtio_i2c_in_hdr - the virtio I2C message IN header
+ * @status: the processing result from the backend
+ */
+struct virtio_i2c_in_hdr {
+ uint8_t status;
+};
+
+/* The final status written by the device */
+#define VIRTIO_I2C_MSG_OK 0
+#define VIRTIO_I2C_MSG_ERR 1
+
+#endif /* _LINUX_VIRTIO_I2C_H */
diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h
index b052355ac7..7aa2eb7662 100644
--- a/include/standard-headers/linux/virtio_ids.h
+++ b/include/standard-headers/linux/virtio_ids.h
@@ -29,24 +29,56 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#define VIRTIO_ID_NET 1 /* virtio net */
-#define VIRTIO_ID_BLOCK 2 /* virtio block */
-#define VIRTIO_ID_CONSOLE 3 /* virtio console */
-#define VIRTIO_ID_RNG 4 /* virtio rng */
-#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
-#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
-#define VIRTIO_ID_SCSI 8 /* virtio scsi */
-#define VIRTIO_ID_9P 9 /* 9p virtio console */
-#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
-#define VIRTIO_ID_CAIF 12 /* Virtio caif */
-#define VIRTIO_ID_GPU 16 /* virtio GPU */
-#define VIRTIO_ID_INPUT 18 /* virtio input */
-#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
-#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
-#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
-#define VIRTIO_ID_MEM 24 /* virtio mem */
-#define VIRTIO_ID_FS 26 /* virtio filesystem */
-#define VIRTIO_ID_PMEM 27 /* virtio pmem */
-#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_NET 1 /* virtio net */
+#define VIRTIO_ID_BLOCK 2 /* virtio block */
+#define VIRTIO_ID_CONSOLE 3 /* virtio console */
+#define VIRTIO_ID_RNG 4 /* virtio rng */
+#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
+#define VIRTIO_ID_IOMEM 6 /* virtio ioMemory */
+#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
+#define VIRTIO_ID_SCSI 8 /* virtio scsi */
+#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_MAC80211_WLAN 10 /* virtio WLAN MAC */
+#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
+#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */
+#define VIRTIO_ID_GPU 16 /* virtio GPU */
+#define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_SIGNAL_DIST 21 /* virtio signal distribution device */
+#define VIRTIO_ID_PSTORE 22 /* virtio pstore device */
+#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
+#define VIRTIO_ID_SOUND 25 /* virtio sound */
+#define VIRTIO_ID_FS 26 /* virtio filesystem */
+#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
+#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
+#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
+#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
+#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
+#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
+#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
+#define VIRTIO_ID_CAN 36 /* virtio can */
+#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
+#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
+#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
+#define VIRTIO_ID_GPIO 41 /* virtio gpio */
+
+/*
+ * Virtio Transitional IDs
+ */
+
+#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/standard-headers/linux/virtio_iommu.h b/include/standard-headers/linux/virtio_iommu.h
index b9443b83a1..366379c2f0 100644
--- a/include/standard-headers/linux/virtio_iommu.h
+++ b/include/standard-headers/linux/virtio_iommu.h
@@ -16,6 +16,7 @@
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
struct virtio_iommu_range_64 {
uint64_t start;
@@ -36,6 +37,8 @@ struct virtio_iommu_config {
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
uint32_t probe_size;
+ uint8_t bypass;
+ uint8_t reserved[3];
};
/* Request types */
@@ -66,11 +69,14 @@ struct virtio_iommu_req_tail {
uint8_t reserved[3];
};
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
struct virtio_iommu_req_attach {
struct virtio_iommu_req_head head;
uint32_t domain;
uint32_t endpoint;
- uint8_t reserved[8];
+ uint32_t flags;
+ uint8_t reserved[4];
struct virtio_iommu_req_tail tail;
};
diff --git a/include/standard-headers/linux/virtio_mem.h b/include/standard-headers/linux/virtio_mem.h
index 05e5ade75d..18c74c527c 100644
--- a/include/standard-headers/linux/virtio_mem.h
+++ b/include/standard-headers/linux/virtio_mem.h
@@ -68,9 +68,10 @@
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
@@ -87,6 +88,8 @@
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
diff --git a/include/standard-headers/linux/virtio_mmio.h b/include/standard-headers/linux/virtio_mmio.h
index c4b09689ab..0650f91bea 100644
--- a/include/standard-headers/linux/virtio_mmio.h
+++ b/include/standard-headers/linux/virtio_mmio.h
@@ -122,6 +122,17 @@
#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+/* Shared memory region id */
+#define VIRTIO_MMIO_SHM_SEL 0x0ac
+
+/* Shared memory region length, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
+#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
+
+/* Shared memory region base address, 64 bits in two halves */
+#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
+#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
+
/* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
index a90f79e1b1..0f88417742 100644
--- a/include/standard-headers/linux/virtio_net.h
+++ b/include/standard-headers/linux/virtio_net.h
@@ -56,8 +56,13 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
+#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
+#define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */
+#define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */
+#define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
+#define VIRTIO_NET_F_GUEST_HDRLEN 59 /* Guest provides the exact hdr_len value. */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device
@@ -87,14 +92,14 @@ struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
uint8_t mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
- uint16_t status;
+ __virtio16 status;
/* Maximum number of each of transmit and receive queues;
* see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
* Legal values are between 1 and 0x8000
*/
- uint16_t max_virtqueue_pairs;
+ __virtio16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
- uint16_t mtu;
+ __virtio16 mtu;
/*
* speed, in units of 1Mb. All values 0 to INT_MAX are legal.
* Any other value stands for unknown.
@@ -130,6 +135,7 @@ struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
uint8_t gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
@@ -355,4 +361,49 @@ struct virtio_net_hash_config {
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+/*
+ * Control notifications coalescing.
+ *
+ * Request the device to change the notifications coalescing parameters.
+ *
+ * Available with the VIRTIO_NET_F_NOTF_COAL feature bit.
+ */
+#define VIRTIO_NET_CTRL_NOTF_COAL 6
+/*
+ * Set the tx-usecs/tx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_tx {
+ /* Maximum number of packets to send before a TX notification */
+ uint32_t tx_max_packets;
+ /* Maximum number of usecs to delay a TX notification */
+ uint32_t tx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
+
+/*
+ * Set the rx-usecs/rx-max-packets parameters.
+ */
+struct virtio_net_ctrl_coal_rx {
+ /* Maximum number of packets to receive before a RX notification */
+ uint32_t rx_max_packets;
+ /* Maximum number of usecs to delay a RX notification */
+ uint32_t rx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET 2
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET 3
+
+struct virtio_net_ctrl_coal {
+ uint32_t max_packets;
+ uint32_t max_usecs;
+};
+
+struct virtio_net_ctrl_coal_vq {
+ uint16_t vqn;
+ uint16_t reserved;
+ struct virtio_net_ctrl_coal coal;
+};
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
index 9262acd130..4010216103 100644
--- a/include/standard-headers/linux/virtio_pci.h
+++ b/include/standard-headers/linux/virtio_pci.h
@@ -113,6 +113,8 @@
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5
+/* Additional shared memory capability */
+#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* This is the PCI capability header: */
struct virtio_pci_cap {
@@ -121,11 +123,18 @@ struct virtio_pci_cap {
uint8_t cap_len; /* Generic PCI field: capability length */
uint8_t cfg_type; /* Identifies the structure. */
uint8_t bar; /* Where to find it. */
- uint8_t padding[3]; /* Pad to full dword. */
+ uint8_t id; /* Multiple capabilities of the same type */
+ uint8_t padding[2]; /* Pad to full dword. */
uint32_t offset; /* Offset within bar. */
uint32_t length; /* Length of the structure, in bytes. */
};
+struct virtio_pci_cap64 {
+ struct virtio_pci_cap cap;
+ uint32_t offset_hi; /* Most sig 32 bits of offset */
+ uint32_t length_hi; /* Most sig 32 bits of length */
+};
+
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
@@ -157,6 +166,20 @@ struct virtio_pci_common_cfg {
uint32_t queue_used_hi; /* read-write */
};
+/*
+ * Warning: do not use sizeof on this: use offsetofend for
+ * specific fields you need.
+ */
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ uint16_t queue_notify_data; /* read-write */
+ uint16_t queue_reset; /* read-write */
+
+ uint16_t admin_queue_index; /* read-only */
+ uint16_t admin_queue_num; /* read-only */
+};
+
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;
@@ -193,7 +216,74 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+#define VIRTIO_PCI_COMMON_Q_NDATA 56
+#define VIRTIO_PCI_COMMON_Q_RESET 58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK 0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+
+struct virtio_admin_cmd_hdr {
+ uint16_t opcode;
+ /*
+ * 1 - SR-IOV
+ * 2-65535 - reserved
+ */
+ uint16_t group_type;
+ /* Unused, reserved for future extensions. */
+ uint8_t reserved1[12];
+ uint64_t group_member_id;
+};
+
+struct virtio_admin_cmd_status {
+ uint16_t status;
+ uint16_t status_qualifier;
+ /* Unused, reserved for future extensions. */
+ uint8_t reserved2[4];
+};
+
+struct virtio_admin_cmd_legacy_wr_data {
+ uint8_t offset; /* Starting offset of the register(s) to write. */
+ uint8_t reserved[7];
+ uint8_t registers[];
+};
+
+struct virtio_admin_cmd_legacy_rd_data {
+ uint8_t offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct virtio_admin_cmd_notify_info_data {
+ uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+ uint8_t bar; /* BAR of the member or the owner device */
+ uint8_t padding[6];
+ uint64_t offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
#endif
diff --git a/include/standard-headers/linux/virtio_pcidev.h b/include/standard-headers/linux/virtio_pcidev.h
new file mode 100644
index 0000000000..bdf1d062da
--- /dev/null
+++ b/include/standard-headers/linux/virtio_pcidev.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef _LINUX_VIRTIO_PCIDEV_H
+#define _LINUX_VIRTIO_PCIDEV_H
+#include "standard-headers/linux/types.h"
+
+/**
+ * enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
+ * @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
+ * the @data field should be filled in by the device (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
+ * the @data field contains the data to write (in little endian).
+ * @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
+ * the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
+ * @VIRTIO_PCIDEV_OP_INT: legacy INTx# pin interrupt, the addr field is 1-4 for
+ * the number
+ * @VIRTIO_PCIDEV_OP_MSI: MSI(-X) interrupt, this message basically transports
+ * the 16- or 32-bit write that would otherwise be done into memory,
+ * analogous to the write messages (@VIRTIO_PCIDEV_OP_MMIO_WRITE) above
+ * @VIRTIO_PCIDEV_OP_PME: Dummy message whose content is ignored (and should be
+ * all zeroes) to signal the PME# pin.
+ */
+enum virtio_pcidev_ops {
+ VIRTIO_PCIDEV_OP_RESERVED = 0,
+ VIRTIO_PCIDEV_OP_CFG_READ,
+ VIRTIO_PCIDEV_OP_CFG_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_READ,
+ VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ VIRTIO_PCIDEV_OP_MMIO_MEMSET,
+ VIRTIO_PCIDEV_OP_INT,
+ VIRTIO_PCIDEV_OP_MSI,
+ VIRTIO_PCIDEV_OP_PME,
+};
+
+/**
+ * struct virtio_pcidev_msg - virtio PCI device operation
+ * @op: the operation to do
+ * @bar: the bar (only with BAR read/write messages)
+ * @reserved: reserved
+ * @size: the size of the read/write (in bytes)
+ * @addr: the address to read/write
+ * @data: the data, normally @size long, but just one byte for
+ * %VIRTIO_PCIDEV_OP_MMIO_MEMSET
+ *
+ * Note: the fields are all in native (CPU) endian, however, the
+ * @data values will often be in little endian (see the ops above.)
+ */
+struct virtio_pcidev_msg {
+ uint8_t op;
+ uint8_t bar;
+ uint16_t reserved;
+ uint32_t size;
+ uint64_t addr;
+ uint8_t data[];
+};
+
+#endif /* _LINUX_VIRTIO_PCIDEV_H */
diff --git a/include/standard-headers/linux/virtio_pmem.h b/include/standard-headers/linux/virtio_pmem.h
index fc029de798..1a2576d017 100644
--- a/include/standard-headers/linux/virtio_pmem.h
+++ b/include/standard-headers/linux/virtio_pmem.h
@@ -14,6 +14,13 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
struct virtio_pmem_config {
uint64_t start;
uint64_t size;
diff --git a/include/standard-headers/linux/virtio_ring.h b/include/standard-headers/linux/virtio_ring.h
index 0fa0e1067f..22f6eb8ca7 100644
--- a/include/standard-headers/linux/virtio_ring.h
+++ b/include/standard-headers/linux/virtio_ring.h
@@ -91,15 +91,21 @@
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
-/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ * if the VRING_DESC_F_NEXT flag is set. We chain unused
+ * descriptors via this, too.
+ */
struct vring_desc {
- /* Address (guest-physical). */
__virtio64 addr;
- /* Length. */
__virtio32 len;
- /* The flags as indicated above. */
__virtio16 flags;
- /* We chain unused descriptors via this, too */
__virtio16 next;
};
diff --git a/include/standard-headers/linux/virtio_scmi.h b/include/standard-headers/linux/virtio_scmi.h
new file mode 100644
index 0000000000..8f2c305aea
--- /dev/null
+++ b/include/standard-headers/linux/virtio_scmi.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_VIRTIO_SCMI_H
+#define _LINUX_VIRTIO_SCMI_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/* Device implements some SCMI notifications, or delayed responses. */
+#define VIRTIO_SCMI_F_P2A_CHANNELS 0
+
+/* Device implements any SCMI statistics shared memory region */
+#define VIRTIO_SCMI_F_SHARED_MEMORY 1
+
+/* Virtqueues */
+
+#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */
+#define VIRTIO_SCMI_VQ_RX 1 /* eventq */
+#define VIRTIO_SCMI_VQ_MAX_CNT 2
+
+#endif /* _LINUX_VIRTIO_SCMI_H */
diff --git a/include/standard-headers/linux/virtio_scsi.h b/include/standard-headers/linux/virtio_scsi.h
index ab66166b6a..663f36cbb7 100644
--- a/include/standard-headers/linux/virtio_scsi.h
+++ b/include/standard-headers/linux/virtio_scsi.h
@@ -103,16 +103,16 @@ struct virtio_scsi_event {
} QEMU_PACKED;
struct virtio_scsi_config {
- uint32_t num_queues;
- uint32_t seg_max;
- uint32_t max_sectors;
- uint32_t cmd_per_lun;
- uint32_t event_info_size;
- uint32_t sense_size;
- uint32_t cdb_size;
- uint16_t max_channel;
- uint16_t max_target;
- uint32_t max_lun;
+ __virtio32 num_queues;
+ __virtio32 seg_max;
+ __virtio32 max_sectors;
+ __virtio32 cmd_per_lun;
+ __virtio32 event_info_size;
+ __virtio32 sense_size;
+ __virtio32 cdb_size;
+ __virtio16 max_channel;
+ __virtio16 max_target;
+ __virtio32 max_lun;
} QEMU_PACKED;
/* Feature Bits */
diff --git a/include/standard-headers/linux/virtio_snd.h b/include/standard-headers/linux/virtio_snd.h
new file mode 100644
index 0000000000..860f12e0a4
--- /dev/null
+++ b/include/standard-headers/linux/virtio_snd.h
@@ -0,0 +1,488 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2021 OpenSynergy GmbH
+ */
+#ifndef VIRTIO_SND_IF_H
+#define VIRTIO_SND_IF_H
+
+#include "standard-headers/linux/virtio_types.h"
+
+/*******************************************************************************
+ * FEATURE BITS
+ */
+enum {
+ /* device supports control elements */
+ VIRTIO_SND_F_CTLS = 0
+};
+
+/*******************************************************************************
+ * CONFIGURATION SPACE
+ */
+struct virtio_snd_config {
+ /* # of available physical jacks */
+ uint32_t jacks;
+ /* # of available PCM streams */
+ uint32_t streams;
+ /* # of available channel maps */
+ uint32_t chmaps;
+ /* # of available control elements */
+ uint32_t controls;
+};
+
+enum {
+ /* device virtqueue indexes */
+ VIRTIO_SND_VQ_CONTROL = 0,
+ VIRTIO_SND_VQ_EVENT,
+ VIRTIO_SND_VQ_TX,
+ VIRTIO_SND_VQ_RX,
+ /* # of device virtqueues */
+ VIRTIO_SND_VQ_MAX
+};
+
+/*******************************************************************************
+ * COMMON DEFINITIONS
+ */
+
+/* supported dataflow directions */
+enum {
+ VIRTIO_SND_D_OUTPUT = 0,
+ VIRTIO_SND_D_INPUT
+};
+
+enum {
+ /* jack control request types */
+ VIRTIO_SND_R_JACK_INFO = 1,
+ VIRTIO_SND_R_JACK_REMAP,
+
+ /* PCM control request types */
+ VIRTIO_SND_R_PCM_INFO = 0x0100,
+ VIRTIO_SND_R_PCM_SET_PARAMS,
+ VIRTIO_SND_R_PCM_PREPARE,
+ VIRTIO_SND_R_PCM_RELEASE,
+ VIRTIO_SND_R_PCM_START,
+ VIRTIO_SND_R_PCM_STOP,
+
+ /* channel map control request types */
+ VIRTIO_SND_R_CHMAP_INFO = 0x0200,
+
+ /* control element request types */
+ VIRTIO_SND_R_CTL_INFO = 0x0300,
+ VIRTIO_SND_R_CTL_ENUM_ITEMS,
+ VIRTIO_SND_R_CTL_READ,
+ VIRTIO_SND_R_CTL_WRITE,
+ VIRTIO_SND_R_CTL_TLV_READ,
+ VIRTIO_SND_R_CTL_TLV_WRITE,
+ VIRTIO_SND_R_CTL_TLV_COMMAND,
+
+ /* jack event types */
+ VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000,
+ VIRTIO_SND_EVT_JACK_DISCONNECTED,
+
+ /* PCM event types */
+ VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100,
+ VIRTIO_SND_EVT_PCM_XRUN,
+
+ /* control element event types */
+ VIRTIO_SND_EVT_CTL_NOTIFY = 0x1200,
+
+ /* common status codes */
+ VIRTIO_SND_S_OK = 0x8000,
+ VIRTIO_SND_S_BAD_MSG,
+ VIRTIO_SND_S_NOT_SUPP,
+ VIRTIO_SND_S_IO_ERR
+};
+
+/* common header */
+struct virtio_snd_hdr {
+ uint32_t code;
+};
+
+/* event notification */
+struct virtio_snd_event {
+ /* VIRTIO_SND_EVT_XXX */
+ struct virtio_snd_hdr hdr;
+ /* optional event data */
+ uint32_t data;
+};
+
+/* common control request to query an item information */
+struct virtio_snd_query_info {
+ /* VIRTIO_SND_R_XXX_INFO */
+ struct virtio_snd_hdr hdr;
+ /* item start identifier */
+ uint32_t start_id;
+ /* item count to query */
+ uint32_t count;
+ /* item information size in bytes */
+ uint32_t size;
+};
+
+/* common item information header */
+struct virtio_snd_info {
+ /* function group node id (High Definition Audio Specification 7.1.2) */
+ uint32_t hda_fn_nid;
+};
+
+/*******************************************************************************
+ * JACK CONTROL MESSAGES
+ */
+struct virtio_snd_jack_hdr {
+ /* VIRTIO_SND_R_JACK_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::jacks - 1 */
+ uint32_t jack_id;
+};
+
+/* supported jack features */
+enum {
+ VIRTIO_SND_JACK_F_REMAP = 0
+};
+
+struct virtio_snd_jack_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_JACK_F_XXX) */
+ uint32_t features;
+ /* pin configuration (High Definition Audio Specification 7.3.3.31) */
+ uint32_t hda_reg_defconf;
+ /* pin capabilities (High Definition Audio Specification 7.3.4.9) */
+ uint32_t hda_reg_caps;
+ /* current jack connection status (0: disconnected, 1: connected) */
+ uint8_t connected;
+
+ uint8_t padding[7];
+};
+
+/* jack remapping control request */
+struct virtio_snd_jack_remap {
+ /* .code = VIRTIO_SND_R_JACK_REMAP */
+ struct virtio_snd_jack_hdr hdr;
+ /* selected association number */
+ uint32_t association;
+ /* selected sequence number */
+ uint32_t sequence;
+};
+
+/*******************************************************************************
+ * PCM CONTROL MESSAGES
+ */
+struct virtio_snd_pcm_hdr {
+ /* VIRTIO_SND_R_PCM_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::streams - 1 */
+ uint32_t stream_id;
+};
+
+/* supported PCM stream features */
+enum {
+ VIRTIO_SND_PCM_F_SHMEM_HOST = 0,
+ VIRTIO_SND_PCM_F_SHMEM_GUEST,
+ VIRTIO_SND_PCM_F_MSG_POLLING,
+ VIRTIO_SND_PCM_F_EVT_SHMEM_PERIODS,
+ VIRTIO_SND_PCM_F_EVT_XRUNS
+};
+
+/* supported PCM sample formats */
+enum {
+ /* analog formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_IMA_ADPCM = 0, /* 4 / 4 bits */
+ VIRTIO_SND_PCM_FMT_MU_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_A_LAW, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_S16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_S18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U18_3, /* 18 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U20_3, /* 20 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_U24_3, /* 24 / 24 bits */
+ VIRTIO_SND_PCM_FMT_S20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U20, /* 20 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U24, /* 24 / 32 bits */
+ VIRTIO_SND_PCM_FMT_S32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_FLOAT64, /* 64 / 64 bits */
+ /* digital formats (width / physical width) */
+ VIRTIO_SND_PCM_FMT_DSD_U8, /* 8 / 8 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U16, /* 16 / 16 bits */
+ VIRTIO_SND_PCM_FMT_DSD_U32, /* 32 / 32 bits */
+ VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME /* 32 / 32 bits */
+};
+
+/* supported PCM frame rates */
+enum {
+ VIRTIO_SND_PCM_RATE_5512 = 0,
+ VIRTIO_SND_PCM_RATE_8000,
+ VIRTIO_SND_PCM_RATE_11025,
+ VIRTIO_SND_PCM_RATE_16000,
+ VIRTIO_SND_PCM_RATE_22050,
+ VIRTIO_SND_PCM_RATE_32000,
+ VIRTIO_SND_PCM_RATE_44100,
+ VIRTIO_SND_PCM_RATE_48000,
+ VIRTIO_SND_PCM_RATE_64000,
+ VIRTIO_SND_PCM_RATE_88200,
+ VIRTIO_SND_PCM_RATE_96000,
+ VIRTIO_SND_PCM_RATE_176400,
+ VIRTIO_SND_PCM_RATE_192000,
+ VIRTIO_SND_PCM_RATE_384000
+};
+
+struct virtio_snd_pcm_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* supported feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ uint32_t features;
+ /* supported sample format bit map (1 << VIRTIO_SND_PCM_FMT_XXX) */
+ uint64_t formats;
+ /* supported frame rate bit map (1 << VIRTIO_SND_PCM_RATE_XXX) */
+ uint64_t rates;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ uint8_t direction;
+ /* minimum # of supported channels */
+ uint8_t channels_min;
+ /* maximum # of supported channels */
+ uint8_t channels_max;
+
+ uint8_t padding[5];
+};
+
+/* set PCM stream format */
+struct virtio_snd_pcm_set_params {
+ /* .code = VIRTIO_SND_R_PCM_SET_PARAMS */
+ struct virtio_snd_pcm_hdr hdr;
+ /* size of the hardware buffer */
+ uint32_t buffer_bytes;
+ /* size of the hardware period */
+ uint32_t period_bytes;
+ /* selected feature bit map (1 << VIRTIO_SND_PCM_F_XXX) */
+ uint32_t features;
+ /* selected # of channels */
+ uint8_t channels;
+ /* selected sample format (VIRTIO_SND_PCM_FMT_XXX) */
+ uint8_t format;
+ /* selected frame rate (VIRTIO_SND_PCM_RATE_XXX) */
+ uint8_t rate;
+
+ uint8_t padding;
+};
+
+/*******************************************************************************
+ * PCM I/O MESSAGES
+ */
+
+/* I/O request header */
+struct virtio_snd_pcm_xfer {
+ /* 0 ... virtio_snd_config::streams - 1 */
+ uint32_t stream_id;
+};
+
+/* I/O request status */
+struct virtio_snd_pcm_status {
+ /* VIRTIO_SND_S_XXX */
+ uint32_t status;
+ /* current device latency */
+ uint32_t latency_bytes;
+};
+
+/*******************************************************************************
+ * CHANNEL MAP CONTROL MESSAGES
+ */
+struct virtio_snd_chmap_hdr {
+ /* VIRTIO_SND_R_CHMAP_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::chmaps - 1 */
+ uint32_t chmap_id;
+};
+
+/* standard channel position definition */
+enum {
+ VIRTIO_SND_CHMAP_NONE = 0, /* undefined */
+ VIRTIO_SND_CHMAP_NA, /* silent */
+ VIRTIO_SND_CHMAP_MONO, /* mono stream */
+ VIRTIO_SND_CHMAP_FL, /* front left */
+ VIRTIO_SND_CHMAP_FR, /* front right */
+ VIRTIO_SND_CHMAP_RL, /* rear left */
+ VIRTIO_SND_CHMAP_RR, /* rear right */
+ VIRTIO_SND_CHMAP_FC, /* front center */
+ VIRTIO_SND_CHMAP_LFE, /* low frequency (LFE) */
+ VIRTIO_SND_CHMAP_SL, /* side left */
+ VIRTIO_SND_CHMAP_SR, /* side right */
+ VIRTIO_SND_CHMAP_RC, /* rear center */
+ VIRTIO_SND_CHMAP_FLC, /* front left center */
+ VIRTIO_SND_CHMAP_FRC, /* front right center */
+ VIRTIO_SND_CHMAP_RLC, /* rear left center */
+ VIRTIO_SND_CHMAP_RRC, /* rear right center */
+ VIRTIO_SND_CHMAP_FLW, /* front left wide */
+ VIRTIO_SND_CHMAP_FRW, /* front right wide */
+ VIRTIO_SND_CHMAP_FLH, /* front left high */
+ VIRTIO_SND_CHMAP_FCH, /* front center high */
+ VIRTIO_SND_CHMAP_FRH, /* front right high */
+ VIRTIO_SND_CHMAP_TC, /* top center */
+ VIRTIO_SND_CHMAP_TFL, /* top front left */
+ VIRTIO_SND_CHMAP_TFR, /* top front right */
+ VIRTIO_SND_CHMAP_TFC, /* top front center */
+ VIRTIO_SND_CHMAP_TRL, /* top rear left */
+ VIRTIO_SND_CHMAP_TRR, /* top rear right */
+ VIRTIO_SND_CHMAP_TRC, /* top rear center */
+ VIRTIO_SND_CHMAP_TFLC, /* top front left center */
+ VIRTIO_SND_CHMAP_TFRC, /* top front right center */
+ VIRTIO_SND_CHMAP_TSL, /* top side left */
+ VIRTIO_SND_CHMAP_TSR, /* top side right */
+ VIRTIO_SND_CHMAP_LLFE, /* left LFE */
+ VIRTIO_SND_CHMAP_RLFE, /* right LFE */
+ VIRTIO_SND_CHMAP_BC, /* bottom center */
+ VIRTIO_SND_CHMAP_BLC, /* bottom left center */
+ VIRTIO_SND_CHMAP_BRC /* bottom right center */
+};
+
+/* maximum possible number of channels */
+#define VIRTIO_SND_CHMAP_MAX_SIZE 18
+
+struct virtio_snd_chmap_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* dataflow direction (VIRTIO_SND_D_XXX) */
+ uint8_t direction;
+ /* # of valid channel position values */
+ uint8_t channels;
+ /* channel position values (VIRTIO_SND_CHMAP_XXX) */
+ uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE];
+};
+
+/*******************************************************************************
+ * CONTROL ELEMENTS MESSAGES
+ */
+struct virtio_snd_ctl_hdr {
+ /* VIRTIO_SND_R_CTL_XXX */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ uint32_t control_id;
+};
+
+/* supported roles for control elements */
+enum {
+ VIRTIO_SND_CTL_ROLE_UNDEFINED = 0,
+ VIRTIO_SND_CTL_ROLE_VOLUME,
+ VIRTIO_SND_CTL_ROLE_MUTE,
+ VIRTIO_SND_CTL_ROLE_GAIN
+};
+
+/* supported value types for control elements */
+enum {
+ VIRTIO_SND_CTL_TYPE_BOOLEAN = 0,
+ VIRTIO_SND_CTL_TYPE_INTEGER,
+ VIRTIO_SND_CTL_TYPE_INTEGER64,
+ VIRTIO_SND_CTL_TYPE_ENUMERATED,
+ VIRTIO_SND_CTL_TYPE_BYTES,
+ VIRTIO_SND_CTL_TYPE_IEC958
+};
+
+/* supported access rights for control elements */
+enum {
+ VIRTIO_SND_CTL_ACCESS_READ = 0,
+ VIRTIO_SND_CTL_ACCESS_WRITE,
+ VIRTIO_SND_CTL_ACCESS_VOLATILE,
+ VIRTIO_SND_CTL_ACCESS_INACTIVE,
+ VIRTIO_SND_CTL_ACCESS_TLV_READ,
+ VIRTIO_SND_CTL_ACCESS_TLV_WRITE,
+ VIRTIO_SND_CTL_ACCESS_TLV_COMMAND
+};
+
+struct virtio_snd_ctl_info {
+ /* common header */
+ struct virtio_snd_info hdr;
+ /* element role (VIRTIO_SND_CTL_ROLE_XXX) */
+ uint32_t role;
+ /* element value type (VIRTIO_SND_CTL_TYPE_XXX) */
+ uint32_t type;
+ /* element access right bit map (1 << VIRTIO_SND_CTL_ACCESS_XXX) */
+ uint32_t access;
+ /* # of members in the element value */
+ uint32_t count;
+ /* index for an element with a non-unique name */
+ uint32_t index;
+ /* name identifier string for the element */
+ uint8_t name[44];
+ /* additional information about the element's value */
+ union {
+ /* VIRTIO_SND_CTL_TYPE_INTEGER */
+ struct {
+ /* minimum supported value */
+ uint32_t min;
+ /* maximum supported value */
+ uint32_t max;
+ /* fixed step size for value (0 = variable size) */
+ uint32_t step;
+ } integer;
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 */
+ struct {
+ /* minimum supported value */
+ uint64_t min;
+ /* maximum supported value */
+ uint64_t max;
+ /* fixed step size for value (0 = variable size) */
+ uint64_t step;
+ } integer64;
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED */
+ struct {
+ /* # of options supported for value */
+ uint32_t items;
+ } enumerated;
+ } value;
+};
+
+struct virtio_snd_ctl_enum_item {
+ /* option name */
+ uint8_t item[64];
+};
+
+struct virtio_snd_ctl_iec958 {
+ /* AES/IEC958 channel status bits */
+ uint8_t status[24];
+ /* AES/IEC958 subcode bits */
+ uint8_t subcode[147];
+ /* nothing */
+ uint8_t pad;
+ /* AES/IEC958 subframe bits */
+ uint8_t dig_subframe[4];
+};
+
+struct virtio_snd_ctl_value {
+ union {
+ /* VIRTIO_SND_CTL_TYPE_BOOLEAN|INTEGER value */
+ uint32_t integer[128];
+ /* VIRTIO_SND_CTL_TYPE_INTEGER64 value */
+ uint64_t integer64[64];
+ /* VIRTIO_SND_CTL_TYPE_ENUMERATED value (option indexes) */
+ uint32_t enumerated[128];
+ /* VIRTIO_SND_CTL_TYPE_BYTES value */
+ uint8_t bytes[512];
+ /* VIRTIO_SND_CTL_TYPE_IEC958 value */
+ struct virtio_snd_ctl_iec958 iec958;
+ } value;
+};
+
+/* supported event reason types */
+enum {
+ /* element's value has changed */
+ VIRTIO_SND_CTL_EVT_MASK_VALUE = 0,
+ /* element's information has changed */
+ VIRTIO_SND_CTL_EVT_MASK_INFO,
+ /* element's metadata has changed */
+ VIRTIO_SND_CTL_EVT_MASK_TLV
+};
+
+struct virtio_snd_ctl_event {
+ /* VIRTIO_SND_EVT_CTL_NOTIFY */
+ struct virtio_snd_hdr hdr;
+ /* 0 ... virtio_snd_config::controls - 1 */
+ uint16_t control_id;
+ /* event reason bit map (1 << VIRTIO_SND_CTL_EVT_MASK_XXX) */
+ uint16_t mask;
+};
+
+#endif /* VIRTIO_SND_IF_H */
diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
index be443211ce..467e751b17 100644
--- a/include/standard-headers/linux/virtio_vsock.h
+++ b/include/standard-headers/linux/virtio_vsock.h
@@ -38,6 +38,9 @@
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_config.h"
+/* The feature bitmap for virtio vsock */
+#define VIRTIO_VSOCK_F_SEQPACKET 1 /* SOCK_SEQPACKET supported */
+
struct virtio_vsock_config {
uint64_t guest_cid;
} QEMU_PACKED;
@@ -65,6 +68,7 @@ struct virtio_vsock_hdr {
enum virtio_vsock_type {
VIRTIO_VSOCK_TYPE_STREAM = 1,
+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
};
enum virtio_vsock_op {
@@ -91,4 +95,10 @@ enum virtio_vsock_shutdown {
VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
};
+/* VIRTIO_VSOCK_OP_RW flags values */
+enum virtio_vsock_rw {
+ VIRTIO_VSOCK_SEQ_EOM = 1,
+ VIRTIO_VSOCK_SEQ_EOR = 2,
+};
+
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/standard-headers/rdma/vmw_pvrdma-abi.h b/include/standard-headers/rdma/vmw_pvrdma-abi.h
deleted file mode 100644
index 0989426a3f..0000000000
--- a/include/standard-headers/rdma/vmw_pvrdma-abi.h
+++ /dev/null
@@ -1,303 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
-/*
- * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of EITHER the GNU General Public License
- * version 2 as published by the Free Software Foundation or the BSD
- * 2-Clause License. This program is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
- * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License version 2 for more details at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program available in the file COPYING in the main
- * directory of this source tree.
- *
- * The BSD 2-Clause License
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __VMW_PVRDMA_ABI_H__
-#define __VMW_PVRDMA_ABI_H__
-
-#include "standard-headers/linux/types.h"
-
-#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
-#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
-#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
-#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
-#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
-#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
-
-enum pvrdma_wr_opcode {
- PVRDMA_WR_RDMA_WRITE,
- PVRDMA_WR_RDMA_WRITE_WITH_IMM,
- PVRDMA_WR_SEND,
- PVRDMA_WR_SEND_WITH_IMM,
- PVRDMA_WR_RDMA_READ,
- PVRDMA_WR_ATOMIC_CMP_AND_SWP,
- PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
- PVRDMA_WR_LSO,
- PVRDMA_WR_SEND_WITH_INV,
- PVRDMA_WR_RDMA_READ_WITH_INV,
- PVRDMA_WR_LOCAL_INV,
- PVRDMA_WR_FAST_REG_MR,
- PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
- PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
- PVRDMA_WR_BIND_MW,
- PVRDMA_WR_REG_SIG_MR,
- PVRDMA_WR_ERROR,
-};
-
-enum pvrdma_wc_status {
- PVRDMA_WC_SUCCESS,
- PVRDMA_WC_LOC_LEN_ERR,
- PVRDMA_WC_LOC_QP_OP_ERR,
- PVRDMA_WC_LOC_EEC_OP_ERR,
- PVRDMA_WC_LOC_PROT_ERR,
- PVRDMA_WC_WR_FLUSH_ERR,
- PVRDMA_WC_MW_BIND_ERR,
- PVRDMA_WC_BAD_RESP_ERR,
- PVRDMA_WC_LOC_ACCESS_ERR,
- PVRDMA_WC_REM_INV_REQ_ERR,
- PVRDMA_WC_REM_ACCESS_ERR,
- PVRDMA_WC_REM_OP_ERR,
- PVRDMA_WC_RETRY_EXC_ERR,
- PVRDMA_WC_RNR_RETRY_EXC_ERR,
- PVRDMA_WC_LOC_RDD_VIOL_ERR,
- PVRDMA_WC_REM_INV_RD_REQ_ERR,
- PVRDMA_WC_REM_ABORT_ERR,
- PVRDMA_WC_INV_EECN_ERR,
- PVRDMA_WC_INV_EEC_STATE_ERR,
- PVRDMA_WC_FATAL_ERR,
- PVRDMA_WC_RESP_TIMEOUT_ERR,
- PVRDMA_WC_GENERAL_ERR,
-};
-
-enum pvrdma_wc_opcode {
- PVRDMA_WC_SEND,
- PVRDMA_WC_RDMA_WRITE,
- PVRDMA_WC_RDMA_READ,
- PVRDMA_WC_COMP_SWAP,
- PVRDMA_WC_FETCH_ADD,
- PVRDMA_WC_BIND_MW,
- PVRDMA_WC_LSO,
- PVRDMA_WC_LOCAL_INV,
- PVRDMA_WC_FAST_REG_MR,
- PVRDMA_WC_MASKED_COMP_SWAP,
- PVRDMA_WC_MASKED_FETCH_ADD,
- PVRDMA_WC_RECV = 1 << 7,
- PVRDMA_WC_RECV_RDMA_WITH_IMM,
-};
-
-enum pvrdma_wc_flags {
- PVRDMA_WC_GRH = 1 << 0,
- PVRDMA_WC_WITH_IMM = 1 << 1,
- PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
- PVRDMA_WC_IP_CSUM_OK = 1 << 3,
- PVRDMA_WC_WITH_SMAC = 1 << 4,
- PVRDMA_WC_WITH_VLAN = 1 << 5,
- PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
- PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
-};
-
-struct pvrdma_alloc_ucontext_resp {
- uint32_t qp_tab_size;
- uint32_t reserved;
-};
-
-struct pvrdma_alloc_pd_resp {
- uint32_t pdn;
- uint32_t reserved;
-};
-
-struct pvrdma_create_cq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_cq_resp {
- uint32_t cqn;
- uint32_t reserved;
-};
-
-struct pvrdma_resize_cq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_srq {
- uint64_t __attribute__((aligned(8))) buf_addr;
- uint32_t buf_size;
- uint32_t reserved;
-};
-
-struct pvrdma_create_srq_resp {
- uint32_t srqn;
- uint32_t reserved;
-};
-
-struct pvrdma_create_qp {
- uint64_t __attribute__((aligned(8))) rbuf_addr;
- uint64_t __attribute__((aligned(8))) sbuf_addr;
- uint32_t rbuf_size;
- uint32_t sbuf_size;
- uint64_t __attribute__((aligned(8))) qp_addr;
-};
-
-struct pvrdma_create_qp_resp {
- uint32_t qpn;
- uint32_t qp_handle;
-};
-
-/* PVRDMA masked atomic compare and swap */
-struct pvrdma_ex_cmp_swap {
- uint64_t __attribute__((aligned(8))) swap_val;
- uint64_t __attribute__((aligned(8))) compare_val;
- uint64_t __attribute__((aligned(8))) swap_mask;
- uint64_t __attribute__((aligned(8))) compare_mask;
-};
-
-/* PVRDMA masked atomic fetch and add */
-struct pvrdma_ex_fetch_add {
- uint64_t __attribute__((aligned(8))) add_val;
- uint64_t __attribute__((aligned(8))) field_boundary;
-};
-
-/* PVRDMA address vector. */
-struct pvrdma_av {
- uint32_t port_pd;
- uint32_t sl_tclass_flowlabel;
- uint8_t dgid[16];
- uint8_t src_path_bits;
- uint8_t gid_index;
- uint8_t stat_rate;
- uint8_t hop_limit;
- uint8_t dmac[6];
- uint8_t reserved[6];
-};
-
-/* PVRDMA scatter/gather entry */
-struct pvrdma_sge {
- uint64_t __attribute__((aligned(8))) addr;
- uint32_t length;
- uint32_t lkey;
-};
-
-/* PVRDMA receive queue work request */
-struct pvrdma_rq_wqe_hdr {
- uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
- uint32_t num_sge; /* size of s/g array */
- uint32_t total_len; /* reserved */
-};
-/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
-
-/* PVRDMA send queue work request */
-struct pvrdma_sq_wqe_hdr {
- uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
- uint32_t num_sge; /* size of s/g array */
- uint32_t total_len; /* reserved */
- uint32_t opcode; /* operation type */
- uint32_t send_flags; /* wr flags */
- union {
- uint32_t imm_data;
- uint32_t invalidate_rkey;
- } ex;
- uint32_t reserved;
- union {
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint32_t rkey;
- uint8_t reserved[4];
- } rdma;
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint64_t __attribute__((aligned(8))) compare_add;
- uint64_t __attribute__((aligned(8))) swap;
- uint32_t rkey;
- uint32_t reserved;
- } atomic;
- struct {
- uint64_t __attribute__((aligned(8))) remote_addr;
- uint32_t log_arg_sz;
- uint32_t rkey;
- union {
- struct pvrdma_ex_cmp_swap cmp_swap;
- struct pvrdma_ex_fetch_add fetch_add;
- } wr_data;
- } masked_atomics;
- struct {
- uint64_t __attribute__((aligned(8))) iova_start;
- uint64_t __attribute__((aligned(8))) pl_pdir_dma;
- uint32_t page_shift;
- uint32_t page_list_len;
- uint32_t length;
- uint32_t access_flags;
- uint32_t rkey;
- uint32_t reserved;
- } fast_reg;
- struct {
- uint32_t remote_qpn;
- uint32_t remote_qkey;
- struct pvrdma_av av;
- } ud;
- } wr;
-};
-/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
-
-/* Completion queue element. */
-struct pvrdma_cqe {
- uint64_t __attribute__((aligned(8))) wr_id;
- uint64_t __attribute__((aligned(8))) qp;
- uint32_t opcode;
- uint32_t status;
- uint32_t byte_len;
- uint32_t imm_data;
- uint32_t src_qp;
- uint32_t wc_flags;
- uint32_t vendor_err;
- uint16_t pkey_index;
- uint16_t slid;
- uint8_t sl;
- uint8_t dlid_path_bits;
- uint8_t port_num;
- uint8_t smac[6];
- uint8_t network_hdr_type;
- uint8_t reserved2[6]; /* Pad to next power of 2 (64). */
-};
-
-#endif /* __VMW_PVRDMA_ABI_H__ */
diff --git a/include/sysemu/accel-blocker.h b/include/sysemu/accel-blocker.h
new file mode 100644
index 0000000000..f07f368358
--- /dev/null
+++ b/include/sysemu/accel-blocker.h
@@ -0,0 +1,55 @@
+/*
+ * Accelerator blocking API, to prevent new ioctls from starting and wait the
+ * running ones finish.
+ * This mechanism differs from pause/resume_all_vcpus() in that it does not
+ * release the BQL.
+ *
+ * Copyright (c) 2022 Red Hat Inc.
+ *
+ * Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ACCEL_BLOCKER_H
+#define ACCEL_BLOCKER_H
+
+#include "sysemu/cpus.h"
+
+void accel_blocker_init(void);
+
+/*
+ * accel_{cpu_}ioctl_begin/end:
+ * Mark when ioctl is about to run or just finished.
+ *
+ * accel_{cpu_}ioctl_begin will block after accel_ioctl_inhibit_begin() is
+ * called, preventing new ioctls to run. They will continue only after
+ * accel_ioctl_inibith_end().
+ */
+void accel_ioctl_begin(void);
+void accel_ioctl_end(void);
+void accel_cpu_ioctl_begin(CPUState *cpu);
+void accel_cpu_ioctl_end(CPUState *cpu);
+
+/*
+ * accel_ioctl_inhibit_begin: start critical section
+ *
+ * This function makes sure that:
+ * 1) incoming accel_{cpu_}ioctl_begin() calls block
+ * 2) wait that all ioctls that were already running reach
+ * accel_{cpu_}ioctl_end(), kicking vcpus if necessary.
+ *
+ * This allows the caller to access shared data or perform operations without
+ * worrying of concurrent vcpus accesses.
+ */
+void accel_ioctl_inhibit_begin(void);
+
+/*
+ * accel_ioctl_inhibit_end: end critical section started by
+ * accel_ioctl_inhibit_begin()
+ *
+ * This function allows blocked accel_{cpu_}ioctl_begin() to continue.
+ */
+void accel_ioctl_inhibit_end(void);
+
+#endif /* ACCEL_BLOCKER_H */
diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h
new file mode 100644
index 0000000000..ef91fc28bb
--- /dev/null
+++ b/include/sysemu/accel-ops.h
@@ -0,0 +1,58 @@
+/*
+ * Accelerator OPS, used for cpus.c module
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef ACCEL_OPS_H
+#define ACCEL_OPS_H
+
+#include "exec/cpu-common.h"
+#include "qom/object.h"
+
+#define ACCEL_OPS_SUFFIX "-ops"
+#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
+#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
+
+typedef struct AccelOpsClass AccelOpsClass;
+DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
+
+/* cpus.c operations interface */
+struct AccelOpsClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ /* initialization function called when accel is chosen */
+ void (*ops_init)(AccelOpsClass *ops);
+
+ bool (*cpus_are_resettable)(void);
+ void (*cpu_reset_hold)(CPUState *cpu);
+
+ void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
+ void (*kick_vcpu_thread)(CPUState *cpu);
+ bool (*cpu_thread_is_idle)(CPUState *cpu);
+
+ void (*synchronize_post_reset)(CPUState *cpu);
+ void (*synchronize_post_init)(CPUState *cpu);
+ void (*synchronize_state)(CPUState *cpu);
+ void (*synchronize_pre_loadvm)(CPUState *cpu);
+ void (*synchronize_pre_resume)(bool step_pending);
+
+ void (*handle_interrupt)(CPUState *cpu, int mask);
+
+ int64_t (*get_virtual_clock)(void);
+ int64_t (*get_elapsed_ticks)(void);
+
+ /* gdbstub hooks */
+ bool (*supports_guest_debug)(void);
+ int (*update_guest_debug)(CPUState *cpu);
+ int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
+ void (*remove_all_breakpoints)(CPUState *cpu);
+};
+
+#endif /* ACCEL_OPS_H */
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 54f069d491..8d041aa84e 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -9,7 +9,6 @@ enum {
QEMU_ARCH_CRIS = (1 << 2),
QEMU_ARCH_I386 = (1 << 3),
QEMU_ARCH_M68K = (1 << 4),
- QEMU_ARCH_LM32 = (1 << 5),
QEMU_ARCH_MICROBLAZE = (1 << 6),
QEMU_ARCH_MIPS = (1 << 7),
QEMU_ARCH_PPC = (1 << 8),
@@ -18,21 +17,17 @@ enum {
QEMU_ARCH_SPARC = (1 << 11),
QEMU_ARCH_XTENSA = (1 << 12),
QEMU_ARCH_OPENRISC = (1 << 13),
- QEMU_ARCH_UNICORE32 = (1 << 14),
- QEMU_ARCH_MOXIE = (1 << 15),
QEMU_ARCH_TRICORE = (1 << 16),
- QEMU_ARCH_NIOS2 = (1 << 17),
QEMU_ARCH_HPPA = (1 << 18),
QEMU_ARCH_RISCV = (1 << 19),
QEMU_ARCH_RX = (1 << 20),
QEMU_ARCH_AVR = (1 << 21),
-
- QEMU_ARCH_NONE = (1 << 31),
+ QEMU_ARCH_HEXAGON = (1 << 22),
+ QEMU_ARCH_LOONGARCH = (1 << 23),
};
extern const uint32_t arch_type;
-int kvm_available(void);
-int xen_available(void);
+void qemu_init_arch_modules(void);
#endif
diff --git a/include/sysemu/balloon.h b/include/sysemu/balloon.h
index 20a2defe3a..867687b73a 100644
--- a/include/sysemu/balloon.h
+++ b/include/sysemu/balloon.h
@@ -15,7 +15,7 @@
#define QEMU_BALLOON_H
#include "exec/cpu-common.h"
-#include "qapi/qapi-types-misc.h"
+#include "qapi/qapi-types-machine.h"
typedef void (QEMUBalloonEvent)(void *opaque, ram_addr_t target);
typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info);
diff --git a/include/sysemu/block-backend-common.h b/include/sysemu/block-backend-common.h
new file mode 100644
index 0000000000..780cea7305
--- /dev/null
+++ b/include/sysemu/block-backend-common.h
@@ -0,0 +1,103 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_COMMON_H
+#define BLOCK_BACKEND_COMMON_H
+
+#include "qemu/iov.h"
+#include "block/throttle-groups.h"
+
+/*
+ * TODO Have to include block/block.h for a bunch of block layer
+ * types. Unfortunately, this pulls in the whole BlockDriverState
+ * API, which we don't want used by many BlockBackend users. Some of
+ * the types belong here, and the rest should be split into a common
+ * header and one for the BlockDriverState API.
+ */
+#include "block/block.h"
+
+/* Callbacks for block device models */
+typedef struct BlockDevOps {
+
+ /*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+ /*
+ * Runs when virtual media changed (monitor commands eject, change)
+ * Argument load is true on load and false on eject.
+ * Beware: doesn't run when a host device's physical media
+ * changes. Sure would be useful if it did.
+ * Device models with removable media must implement this callback.
+ */
+ void (*change_media_cb)(void *opaque, bool load, Error **errp);
+ /*
+ * Runs when an eject request is issued from the monitor, the tray
+ * is closed, and the medium is locked.
+ * Device models that do not implement is_medium_locked will not need
+ * this callback. Device models that can lock the medium or tray might
+ * want to implement the callback and unlock the tray when "force" is
+ * true, even if they do not support eject requests.
+ */
+ void (*eject_request_cb)(void *opaque, bool force);
+
+ /*
+ * Is the virtual medium locked into the device?
+ * Device models implement this only when device has such a lock.
+ */
+ bool (*is_medium_locked)(void *opaque);
+
+ /*
+ * Runs when the backend receives a drain request.
+ */
+ void (*drained_begin)(void *opaque);
+ /*
+ * Runs when the backend's last drain request ends.
+ */
+ void (*drained_end)(void *opaque);
+ /*
+ * Is the device still busy?
+ */
+ bool (*drained_poll)(void *opaque);
+
+ /*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+ /*
+ * Is the virtual tray open?
+ * Device models implement this only when the device has a tray.
+ */
+ bool (*is_tray_open)(void *opaque);
+
+ /*
+ * Runs when the size changed (e.g. monitor command block_resize)
+ */
+ void (*resize_cb)(void *opaque);
+} BlockDevOps;
+
+/*
+ * This struct is embedded in (the private) BlockBackend struct and contains
+ * fields that must be public. This is in particular for QLIST_ENTRY() and
+ * friends so that BlockBackends can be kept in lists outside block-backend.c
+ */
+typedef struct BlockBackendPublic {
+ ThrottleGroupMember throttle_group_member;
+} BlockBackendPublic;
+
+#endif /* BLOCK_BACKEND_COMMON_H */
diff --git a/include/sysemu/block-backend-global-state.h b/include/sysemu/block-backend-global-state.h
new file mode 100644
index 0000000000..49c12b0fa9
--- /dev/null
+++ b/include/sysemu/block-backend-global-state.h
@@ -0,0 +1,133 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_GLOBAL_STATE_H
+#define BLOCK_BACKEND_GLOBAL_STATE_H
+
+#include "block-backend-common.h"
+
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
+
+BlockBackend * no_coroutine_fn
+blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm,
+ Error **errp);
+
+BlockBackend * no_coroutine_fn
+blk_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+BlockBackend * coroutine_fn no_co_wrapper
+blk_co_new_open(const char *filename, const char *reference, QDict *options,
+ int flags, Error **errp);
+
+int blk_get_refcnt(BlockBackend *blk);
+void blk_ref(BlockBackend *blk);
+
+void no_coroutine_fn blk_unref(BlockBackend *blk);
+void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
+
+void blk_remove_all_bs(void);
+BlockBackend *blk_by_name(const char *name);
+BlockBackend *blk_next(BlockBackend *blk);
+BlockBackend *blk_all_next(BlockBackend *blk);
+bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
+void monitor_remove_blk(BlockBackend *blk);
+
+BlockBackendPublic *blk_get_public(BlockBackend *blk);
+BlockBackend *blk_by_public(BlockBackendPublic *public);
+
+void blk_remove_bs(BlockBackend *blk);
+int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
+int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
+bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
+bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
+int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
+ uint64_t shared_perm, Error **errp);
+void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
+
+void blk_iostatus_enable(BlockBackend *blk);
+BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
+void blk_iostatus_disable(BlockBackend *blk);
+void blk_iostatus_reset(BlockBackend *blk);
+int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
+void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
+DeviceState *blk_get_attached_dev(BlockBackend *blk);
+BlockBackend *blk_by_dev(void *dev);
+BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
+void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
+
+void blk_activate(BlockBackend *blk, Error **errp);
+
+int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
+void blk_aio_cancel(BlockAIOCB *acb);
+int blk_commit_all(void);
+bool blk_in_drain(BlockBackend *blk);
+void blk_drain(BlockBackend *blk);
+void blk_drain_all(void);
+void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
+ BlockdevOnError on_write_error);
+bool blk_supports_write_perm(BlockBackend *blk);
+bool blk_is_sg(BlockBackend *blk);
+void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
+int blk_get_flags(BlockBackend *blk);
+bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
+void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
+void blk_op_block_all(BlockBackend *blk, Error *reason);
+void blk_op_unblock_all(BlockBackend *blk, Error *reason);
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp);
+void blk_add_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque);
+void blk_remove_aio_context_notifier(BlockBackend *blk,
+ void (*attached_aio_context)(AioContext *,
+ void *),
+ void (*detach_aio_context)(void *),
+ void *opaque);
+void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
+void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
+BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
+void blk_update_root_state(BlockBackend *blk);
+bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
+int blk_get_open_flags_from_root_state(BlockBackend *blk);
+
+int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
+ int64_t pos, int size);
+int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
+int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
+int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
+
+void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
+void blk_io_limits_disable(BlockBackend *blk);
+void blk_io_limits_enable(BlockBackend *blk, const char *group);
+void blk_io_limits_update_group(BlockBackend *blk, const char *group);
+void blk_set_force_allow_inactivate(BlockBackend *blk);
+
+bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp);
+void blk_unregister_buf(BlockBackend *blk, void *host, size_t size);
+
+const BdrvChild *blk_root(BlockBackend *blk);
+
+int blk_make_empty(BlockBackend *blk, Error **errp);
+
+#endif /* BLOCK_BACKEND_GLOBAL_STATE_H */
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
new file mode 100644
index 0000000000..d174275a5c
--- /dev/null
+++ b/include/sysemu/block-backend-io.h
@@ -0,0 +1,230 @@
+/*
+ * QEMU Block backends
+ *
+ * Copyright (C) 2014-2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1
+ * or later. See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BLOCK_BACKEND_IO_H
+#define BLOCK_BACKEND_IO_H
+
+#include "block-backend-common.h"
+#include "block/accounting.h"
+
+/*
+ * I/O API functions. These functions are thread-safe.
+ *
+ * See include/block/block-io.h for more information about
+ * the I/O API.
+ */
+
+const char *blk_name(const BlockBackend *blk);
+
+BlockDriverState *blk_bs(BlockBackend *blk);
+
+void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
+void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
+bool blk_iostatus_is_enabled(const BlockBackend *blk);
+
+char *blk_get_attached_dev_id(BlockBackend *blk);
+
+BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+
+BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_flush(BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes,
+ BlockCompletionFunc *cb, void *opaque);
+void blk_aio_cancel_async(BlockAIOCB *acb);
+BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
+ BlockCompletionFunc *cb, void *opaque);
+
+void blk_inc_in_flight(BlockBackend *blk);
+void blk_dec_in_flight(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_inserted(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_inserted(BlockBackend *blk);
+
+bool coroutine_fn GRAPH_RDLOCK blk_co_is_available(BlockBackend *blk);
+bool co_wrapper_mixed_bdrv_rdlock blk_is_available(BlockBackend *blk);
+
+void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked);
+void co_wrapper blk_lock_medium(BlockBackend *blk, bool locked);
+
+void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag);
+void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag);
+
+int64_t coroutine_fn blk_co_getlength(BlockBackend *blk);
+int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk);
+
+void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
+ uint64_t *nb_sectors_ptr);
+void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
+
+int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk);
+int64_t blk_nb_sectors(BlockBackend *blk);
+
+void *blk_try_blockalign(BlockBackend *blk, size_t size);
+void *blk_blockalign(BlockBackend *blk, size_t size);
+bool blk_is_writable(BlockBackend *blk);
+bool blk_enable_write_cache(BlockBackend *blk);
+BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
+BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
+ int error);
+void blk_error_action(BlockBackend *blk, BlockErrorAction action,
+ bool is_read, int error);
+void blk_iostatus_set_err(BlockBackend *blk, int error);
+int blk_get_max_iov(BlockBackend *blk);
+int blk_get_max_hw_iov(BlockBackend *blk);
+
+AioContext *blk_get_aio_context(BlockBackend *blk);
+BlockAcctStats *blk_get_stats(BlockBackend *blk);
+void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
+ BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
+ BlockCompletionFunc *cb,
+ void *opaque, int ret);
+
+uint32_t blk_get_request_alignment(BlockBackend *blk);
+uint32_t blk_get_max_transfer(BlockBackend *blk);
+uint64_t blk_get_max_hw_transfer(BlockBackend *blk);
+
+int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
+ BlockBackend *blk_out, int64_t off_out,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
+
+int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
+ BlockDriverState *base,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
+ BlockDriverState **file);
+int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
+ BlockDriverState *base,
+ bool include_base, int64_t offset,
+ int64_t bytes, int64_t *pnum);
+
+/*
+ * "I/O or GS" API functions. These functions can run without
+ * the BQL, but only in one specific iothread/main loop.
+ *
+ * See include/block/block-io.h for more information about
+ * the "I/O or GS" API.
+ */
+
+int co_wrapper_mixed blk_pread(BlockBackend *blk, int64_t offset,
+ int64_t bytes, void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes,
+ void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes,
+ const void *buf, BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes, QEMUIOVector *qiov,
+ size_t qiov_offset,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pwrite_compressed(BlockBackend *blk,
+ int64_t offset, int64_t bytes,
+ const void *buf);
+int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset,
+ int64_t bytes, const void *buf);
+
+int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes,
+ BdrvRequestFlags flags);
+int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags);
+
+int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones);
+int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
+ int64_t offset, int64_t len);
+int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
+
+int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
+ int64_t bytes);
+
+int co_wrapper_mixed blk_flush(BlockBackend *blk);
+int coroutine_fn blk_co_flush(BlockBackend *blk);
+
+int co_wrapper_mixed blk_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
+ void *buf);
+
+int co_wrapper_mixed blk_truncate(BlockBackend *blk, int64_t offset,
+ bool exact, PreallocMode prealloc,
+ BdrvRequestFlags flags, Error **errp);
+int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact,
+ PreallocMode prealloc, BdrvRequestFlags flags,
+ Error **errp);
+
+#endif /* BLOCK_BACKEND_IO_H */
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index 8203d7f6f9..038be9fc40 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -13,259 +13,9 @@
#ifndef BLOCK_BACKEND_H
#define BLOCK_BACKEND_H
-#include "qemu/iov.h"
-#include "block/throttle-groups.h"
+#include "block-backend-global-state.h"
+#include "block-backend-io.h"
-/*
- * TODO Have to include block/block.h for a bunch of block layer
- * types. Unfortunately, this pulls in the whole BlockDriverState
- * API, which we don't want used by many BlockBackend users. Some of
- * the types belong here, and the rest should be split into a common
- * header and one for the BlockDriverState API.
- */
-#include "block/block.h"
-
-/* Callbacks for block device models */
-typedef struct BlockDevOps {
- /*
- * Runs when virtual media changed (monitor commands eject, change)
- * Argument load is true on load and false on eject.
- * Beware: doesn't run when a host device's physical media
- * changes. Sure would be useful if it did.
- * Device models with removable media must implement this callback.
- */
- void (*change_media_cb)(void *opaque, bool load, Error **errp);
- /*
- * Runs when an eject request is issued from the monitor, the tray
- * is closed, and the medium is locked.
- * Device models that do not implement is_medium_locked will not need
- * this callback. Device models that can lock the medium or tray might
- * want to implement the callback and unlock the tray when "force" is
- * true, even if they do not support eject requests.
- */
- void (*eject_request_cb)(void *opaque, bool force);
- /*
- * Is the virtual tray open?
- * Device models implement this only when the device has a tray.
- */
- bool (*is_tray_open)(void *opaque);
- /*
- * Is the virtual medium locked into the device?
- * Device models implement this only when device has such a lock.
- */
- bool (*is_medium_locked)(void *opaque);
- /*
- * Runs when the size changed (e.g. monitor command block_resize)
- */
- void (*resize_cb)(void *opaque);
- /*
- * Runs when the backend receives a drain request.
- */
- void (*drained_begin)(void *opaque);
- /*
- * Runs when the backend's last drain request ends.
- */
- void (*drained_end)(void *opaque);
-} BlockDevOps;
-
-/* This struct is embedded in (the private) BlockBackend struct and contains
- * fields that must be public. This is in particular for QLIST_ENTRY() and
- * friends so that BlockBackends can be kept in lists outside block-backend.c
- * */
-typedef struct BlockBackendPublic {
- ThrottleGroupMember throttle_group_member;
-} BlockBackendPublic;
-
-BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm);
-BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
- uint64_t shared_perm, Error **errp);
-BlockBackend *blk_new_open(const char *filename, const char *reference,
- QDict *options, int flags, Error **errp);
-int blk_get_refcnt(BlockBackend *blk);
-void blk_ref(BlockBackend *blk);
-void blk_unref(BlockBackend *blk);
-void blk_remove_all_bs(void);
-const char *blk_name(const BlockBackend *blk);
-BlockBackend *blk_by_name(const char *name);
-BlockBackend *blk_next(BlockBackend *blk);
-BlockBackend *blk_all_next(BlockBackend *blk);
-bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
-void monitor_remove_blk(BlockBackend *blk);
-
-BlockBackendPublic *blk_get_public(BlockBackend *blk);
-BlockBackend *blk_by_public(BlockBackendPublic *public);
-
-BlockDriverState *blk_bs(BlockBackend *blk);
-void blk_remove_bs(BlockBackend *blk);
-int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
-bool bdrv_has_blk(BlockDriverState *bs);
-bool bdrv_is_root_node(BlockDriverState *bs);
-int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
- Error **errp);
-void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
-
-void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
-void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
-void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
-void blk_iostatus_enable(BlockBackend *blk);
-bool blk_iostatus_is_enabled(const BlockBackend *blk);
-BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
-void blk_iostatus_disable(BlockBackend *blk);
-void blk_iostatus_reset(BlockBackend *blk);
-void blk_iostatus_set_err(BlockBackend *blk, int error);
-int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
-void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
-DeviceState *blk_get_attached_dev(BlockBackend *blk);
-char *blk_get_attached_dev_id(BlockBackend *blk);
-BlockBackend *blk_by_dev(void *dev);
-BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
-void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
-int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
- unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
- unsigned int bytes,
- QEMUIOVector *qiov, size_t qiov_offset,
- BdrvRequestFlags flags);
-int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
- unsigned int bytes, QEMUIOVector *qiov,
- BdrvRequestFlags flags);
-
-static inline int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset,
- unsigned int bytes, void *buf,
- BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
-
- return blk_co_preadv(blk, offset, bytes, &qiov, flags);
-}
-
-static inline int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset,
- unsigned int bytes, void *buf,
- BdrvRequestFlags flags)
-{
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
-
- return blk_co_pwritev(blk, offset, bytes, &qiov, flags);
-}
-
-int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
-int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes);
-int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes,
- BdrvRequestFlags flags);
-int64_t blk_getlength(BlockBackend *blk);
-void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
-int64_t blk_nb_sectors(BlockBackend *blk);
-BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
- QEMUIOVector *qiov, BdrvRequestFlags flags,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_flush(BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque);
-void blk_aio_cancel(BlockAIOCB *acb);
-void blk_aio_cancel_async(BlockAIOCB *acb);
-int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
-BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
- BlockCompletionFunc *cb, void *opaque);
-int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
-int blk_co_flush(BlockBackend *blk);
-int blk_flush(BlockBackend *blk);
-int blk_commit_all(void);
-void blk_inc_in_flight(BlockBackend *blk);
-void blk_dec_in_flight(BlockBackend *blk);
-void blk_drain(BlockBackend *blk);
-void blk_drain_all(void);
-void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
- BlockdevOnError on_write_error);
-BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read);
-BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
- int error);
-void blk_error_action(BlockBackend *blk, BlockErrorAction action,
- bool is_read, int error);
-bool blk_is_read_only(BlockBackend *blk);
-bool blk_is_sg(BlockBackend *blk);
-bool blk_enable_write_cache(BlockBackend *blk);
-void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
-void blk_invalidate_cache(BlockBackend *blk, Error **errp);
-bool blk_is_inserted(BlockBackend *blk);
-bool blk_is_available(BlockBackend *blk);
-void blk_lock_medium(BlockBackend *blk, bool locked);
-void blk_eject(BlockBackend *blk, bool eject_flag);
-int blk_get_flags(BlockBackend *blk);
-uint32_t blk_get_request_alignment(BlockBackend *blk);
-uint32_t blk_get_max_transfer(BlockBackend *blk);
-int blk_get_max_iov(BlockBackend *blk);
-void blk_set_guest_block_size(BlockBackend *blk, int align);
-void *blk_try_blockalign(BlockBackend *blk, size_t size);
-void *blk_blockalign(BlockBackend *blk, size_t size);
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
-void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
-void blk_op_block_all(BlockBackend *blk, Error *reason);
-void blk_op_unblock_all(BlockBackend *blk, Error *reason);
-AioContext *blk_get_aio_context(BlockBackend *blk);
-int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
- Error **errp);
-void blk_add_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *new_context, void *opaque),
- void (*detach_aio_context)(void *opaque), void *opaque);
-void blk_remove_aio_context_notifier(BlockBackend *blk,
- void (*attached_aio_context)(AioContext *,
- void *),
- void (*detach_aio_context)(void *),
- void *opaque);
-void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
-void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
-void blk_io_plug(BlockBackend *blk);
-void blk_io_unplug(BlockBackend *blk);
-BlockAcctStats *blk_get_stats(BlockBackend *blk);
-BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
-void blk_update_root_state(BlockBackend *blk);
-bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);
-int blk_get_open_flags_from_root_state(BlockBackend *blk);
-
-void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
- BlockCompletionFunc *cb, void *opaque);
-int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int bytes, BdrvRequestFlags flags);
-int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
- int bytes);
-int blk_truncate(BlockBackend *blk, int64_t offset, bool exact,
- PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
-int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes);
-int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
- int64_t pos, int size);
-int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
-int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz);
-int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo);
-BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
- BlockCompletionFunc *cb,
- void *opaque, int ret);
-
-void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg);
-void blk_io_limits_disable(BlockBackend *blk);
-void blk_io_limits_enable(BlockBackend *blk, const char *group);
-void blk_io_limits_update_group(BlockBackend *blk, const char *group);
-void blk_set_force_allow_inactivate(BlockBackend *blk);
-
-void blk_register_buf(BlockBackend *blk, void *host, size_t size);
-void blk_unregister_buf(BlockBackend *blk, void *host);
-
-int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
- BlockBackend *blk_out, int64_t off_out,
- int bytes, BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags);
-
-const BdrvChild *blk_root(BlockBackend *blk);
-
-int blk_make_empty(BlockBackend *blk, Error **errp);
+/* DO NOT ADD ANYTHING IN HERE. USE ONE OF THE HEADERS INCLUDED ABOVE */
#endif
diff --git a/include/sysemu/block-ram-registrar.h b/include/sysemu/block-ram-registrar.h
new file mode 100644
index 0000000000..d8b2f7942b
--- /dev/null
+++ b/include/sysemu/block-ram-registrar.h
@@ -0,0 +1,37 @@
+/*
+ * BlockBackend RAM Registrar
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef BLOCK_RAM_REGISTRAR_H
+#define BLOCK_RAM_REGISTRAR_H
+
+#include "exec/ramlist.h"
+
+/**
+ * struct BlockRAMRegistrar:
+ *
+ * Keeps RAMBlock memory registered with a BlockBackend using
+ * blk_register_buf() including hotplugged memory.
+ *
+ * Emulated devices or other BlockBackend users initialize a BlockRAMRegistrar
+ * with blk_ram_registrar_init() before submitting I/O requests with the
+ * BDRV_REQ_REGISTERED_BUF flag set.
+ */
+typedef struct {
+ BlockBackend *blk;
+ RAMBlockNotifier notifier;
+ bool ok;
+} BlockRAMRegistrar;
+
+void blk_ram_registrar_init(BlockRAMRegistrar *r, BlockBackend *blk);
+void blk_ram_registrar_destroy(BlockRAMRegistrar *r);
+
+/* Have all RAMBlocks been registered successfully? */
+static inline bool blk_ram_registrar_ok(BlockRAMRegistrar *r)
+{
+ return r->ok;
+}
+
+#endif /* BLOCK_RAM_REGISTRAR_H */
diff --git a/include/sysemu/blockdev.h b/include/sysemu/blockdev.h
index 3b5fcda08d..3211b16513 100644
--- a/include/sysemu/blockdev.h
+++ b/include/sysemu/blockdev.h
@@ -13,9 +13,6 @@
#include "block/block.h"
#include "qemu/queue.h"
-void blockdev_mark_auto_del(BlockBackend *blk);
-void blockdev_auto_del(BlockBackend *blk);
-
typedef enum {
IF_DEFAULT = -1, /* for use with drive_add() only */
/*
@@ -35,10 +32,19 @@ struct DriveInfo {
bool is_default; /* Added by default_drive() ? */
int media_cd;
QemuOpts *opts;
- bool claimed_by_board;
QTAILQ_ENTRY(DriveInfo) next;
};
+/*
+ * Global state (GS) API. These functions run under the BQL.
+ *
+ * See include/block/block-global-state.h for more information about
+ * the GS API.
+ */
+
+void blockdev_mark_auto_del(BlockBackend *blk);
+void blockdev_auto_del(BlockBackend *blk);
+
DriveInfo *blk_legacy_dinfo(BlockBackend *blk);
DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo);
BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo);
@@ -46,14 +52,10 @@ BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo);
void override_max_devs(BlockInterfaceType type, int max_devs);
DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit);
-void drive_mark_claimed_by_board(void);
void drive_check_orphaned(void);
DriveInfo *drive_get_by_index(BlockInterfaceType type, int index);
int drive_get_max_bus(BlockInterfaceType type);
-int drive_get_max_devs(BlockInterfaceType type);
-DriveInfo *drive_get_next(BlockInterfaceType type);
-QemuOpts *drive_def(const char *optstr);
QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
const char *optstr);
DriveInfo *drive_new(QemuOpts *arg, BlockInterfaceType block_default_type,
diff --git a/include/sysemu/cpu-timers-internal.h b/include/sysemu/cpu-timers-internal.h
new file mode 100644
index 0000000000..94bb7394c5
--- /dev/null
+++ b/include/sysemu/cpu-timers-internal.h
@@ -0,0 +1,71 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TIMERS_STATE_H
+#define TIMERS_STATE_H
+
+/* timers state, for sharing between icount and cpu-timers */
+
+typedef struct TimersState {
+ /* Protected by BQL. */
+ int64_t cpu_ticks_prev;
+ int64_t cpu_ticks_offset;
+
+ /*
+ * Protect fields that can be respectively read outside the
+ * BQL, and written from multiple threads.
+ */
+ QemuSeqLock vm_clock_seqlock;
+ QemuSpin vm_clock_lock;
+
+ int16_t cpu_ticks_enabled;
+
+ /* Conversion factor from emulated instructions to virtual clock ticks. */
+ int16_t icount_time_shift;
+ /* Icount delta used for shift auto adjust. */
+ int64_t last_delta;
+
+ /* Compensate for varying guest execution speed. */
+ aligned_int64_t qemu_icount_bias;
+
+ int64_t vm_clock_warp_start;
+ int64_t cpu_clock_offset;
+
+ /* Only written by TCG thread */
+ int64_t qemu_icount;
+
+ /* for adjusting icount */
+ QEMUTimer *icount_rt_timer;
+ QEMUTimer *icount_vm_timer;
+ QEMUTimer *icount_warp_timer;
+} TimersState;
+
+extern TimersState timers_state;
+
+/*
+ * icount needs this internal from cpu-timers when adjusting the icount shift.
+ */
+int64_t cpu_get_clock_locked(void);
+
+#endif /* TIMERS_STATE_H */
diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h
new file mode 100644
index 0000000000..d86738a378
--- /dev/null
+++ b/include/sysemu/cpu-timers.h
@@ -0,0 +1,103 @@
+/*
+ * CPU timers state API
+ *
+ * Copyright 2020 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef SYSEMU_CPU_TIMERS_H
+#define SYSEMU_CPU_TIMERS_H
+
+#include "qemu/timer.h"
+
+/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */
+void cpu_timers_init(void);
+
+/* icount - Instruction Counter API */
+
+/**
+ * ICountMode: icount enablement state:
+ *
+ * @ICOUNT_DISABLED: Disabled - Do not count executed instructions.
+ * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option
+ * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift
+ */
+typedef enum {
+ ICOUNT_DISABLED = 0,
+ ICOUNT_PRECISE,
+ ICOUNT_ADAPTATIVE,
+} ICountMode;
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+extern ICountMode use_icount;
+#define icount_enabled() (use_icount)
+#else
+#define icount_enabled() ICOUNT_DISABLED
+#endif
+
+/*
+ * Update the icount with the executed instructions. Called by
+ * cpus-tcg vCPU thread so the main-loop can see time has moved forward.
+ */
+void icount_update(CPUState *cpu);
+
+/* get raw icount value */
+int64_t icount_get_raw(void);
+
+/* return the virtual CPU time in ns, based on the instruction counter. */
+int64_t icount_get(void);
+/*
+ * convert an instruction counter value to ns, based on the icount shift.
+ * This shift is set as a fixed value with the icount "shift" option
+ * (precise mode), or it is constantly approximated and corrected at
+ * runtime in adaptive mode.
+ */
+int64_t icount_to_ns(int64_t icount);
+
+/**
+ * icount_configure: configure the icount options, including "shift"
+ * @opts: Options to parse
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Return: true on success, else false setting @errp with error
+ */
+bool icount_configure(QemuOpts *opts, Error **errp);
+
+/* used by tcg vcpu thread to calc icount budget */
+int64_t icount_round(int64_t count);
+
+/* if the CPUs are idle, start accounting real time to virtual clock. */
+void icount_start_warp_timer(void);
+void icount_account_warp_timer(void);
+void icount_notify_exit(void);
+
+/*
+ * CPU Ticks and Clock
+ */
+
+/* Caller must hold BQL */
+void cpu_enable_ticks(void);
+/* Caller must hold BQL */
+void cpu_disable_ticks(void);
+
+/*
+ * return the time elapsed in VM between vm_start and vm_stop.
+ * cpu_get_ticks() uses units of the host CPU cycle counter.
+ */
+int64_t cpu_get_ticks(void);
+
+/*
+ * Returns the monotonic time elapsed in VM, i.e.,
+ * the time between vm_start and vm_stop
+ */
+int64_t cpu_get_clock(void);
+
+void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
+
+/* get the VIRTUAL clock and VM elapsed ticks via the cpus accel interface */
+int64_t cpus_get_virtual_clock(void);
+int64_t cpus_get_elapsed_ticks(void);
+
+#endif /* SYSEMU_CPU_TIMERS_H */
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index 3c1da6a018..b4a566cfe7 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -1,43 +1,53 @@
#ifndef QEMU_CPUS_H
#define QEMU_CPUS_H
-#include "qemu/timer.h"
+#include "sysemu/accel-ops.h"
+
+/* register accel-specific operations */
+void cpus_register_accel(const AccelOpsClass *i);
+
+/* return registers ops */
+const AccelOpsClass *cpus_get_accel(void);
+
+/* accel/dummy-cpus.c */
+
+/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
+void dummy_start_vcpu_thread(CPUState *);
+
+/* interface available for cpus accelerator threads */
+
+/* For temporary buffers for forming a name */
+#define VCPU_THREAD_NAME_SIZE 16
+
+void cpus_kick_thread(CPUState *cpu);
+bool cpu_work_list_empty(CPUState *cpu);
+bool cpu_thread_is_idle(CPUState *cpu);
+bool all_cpu_threads_idle(void);
+bool cpu_can_run(CPUState *cpu);
+void qemu_wait_io_event_common(CPUState *cpu);
+void qemu_wait_io_event(CPUState *cpu);
+void cpu_thread_signal_created(CPUState *cpu);
+void cpu_thread_signal_destroyed(CPUState *cpu);
+void cpu_handle_guest_debug(CPUState *cpu);
+
+/* end interface for cpus accelerator threads */
-/* cpus.c */
bool qemu_in_vcpu_thread(void);
void qemu_init_cpu_loop(void);
void resume_all_vcpus(void);
void pause_all_vcpus(void);
void cpu_stop_current(void);
-void cpu_ticks_init(void);
-void configure_icount(QemuOpts *opts, Error **errp);
-extern int use_icount;
extern int icount_align_option;
-/* drift information for info jit command */
-extern int64_t max_delay;
-extern int64_t max_advance;
-void dump_drift_info(void);
-
/* Unblock cpu */
void qemu_cpu_kick_self(void);
-void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
+
+bool cpus_are_resettable(void);
void cpu_synchronize_all_states(void);
void cpu_synchronize_all_post_reset(void);
void cpu_synchronize_all_post_init(void);
void cpu_synchronize_all_pre_loadvm(void);
-void qtest_clock_warp(int64_t dest);
-
-#ifndef CONFIG_USER_ONLY
-/* vl.c */
-/* *-user doesn't have configurable SMP topology */
-extern int smp_cores;
-extern int smp_threads;
-#endif
-
-void list_cpus(const char *optarg);
-
#endif
diff --git a/include/sysemu/cryptodev-vhost-user.h b/include/sysemu/cryptodev-vhost-user.h
index 0d3421e7e8..60710502c2 100644
--- a/include/sysemu/cryptodev-vhost-user.h
+++ b/include/sysemu/cryptodev-vhost-user.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/sysemu/cryptodev-vhost.h b/include/sysemu/cryptodev-vhost.h
index f42824fbde..4c3c22acae 100644
--- a/include/sysemu/cryptodev-vhost.h
+++ b/include/sysemu/cryptodev-vhost.h
@@ -10,7 +10,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -79,7 +79,7 @@ cryptodev_vhost_init(
* cryptodev_vhost_cleanup:
* @crypto: the cryptodev backend common vhost object
*
- * Clean the resouce associated with @crypto that realizaed
+ * Clean the resource associated with @crypto that realizaed
* by cryptodev_vhost_init()
*
*/
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
index 35eab06d0e..96d3998b93 100644
--- a/include/sysemu/cryptodev.h
+++ b/include/sysemu/cryptodev.h
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -24,7 +24,9 @@
#define CRYPTODEV_H
#include "qemu/queue.h"
+#include "qemu/throttle.h"
#include "qom/object.h"
+#include "qapi/qapi-types-cryptodev.h"
/**
* CryptoDevBackend:
@@ -37,15 +39,8 @@
#define TYPE_CRYPTODEV_BACKEND "cryptodev-backend"
-#define CRYPTODEV_BACKEND(obj) \
- OBJECT_CHECK(CryptoDevBackend, \
- (obj), TYPE_CRYPTODEV_BACKEND)
-#define CRYPTODEV_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(CryptoDevBackendClass, \
- (obj), TYPE_CRYPTODEV_BACKEND)
-#define CRYPTODEV_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(CryptoDevBackendClass, \
- (klass), TYPE_CRYPTODEV_BACKEND)
+OBJECT_DECLARE_TYPE(CryptoDevBackend, CryptoDevBackendClass,
+ CRYPTODEV_BACKEND)
#define MAX_CRYPTO_QUEUE_NUM 64
@@ -54,17 +49,10 @@ typedef struct CryptoDevBackendConf CryptoDevBackendConf;
typedef struct CryptoDevBackendPeers CryptoDevBackendPeers;
typedef struct CryptoDevBackendClient
CryptoDevBackendClient;
-typedef struct CryptoDevBackend CryptoDevBackend;
-
-enum CryptoDevBackendAlgType {
- CRYPTODEV_BACKEND_ALG_SYM,
- CRYPTODEV_BACKEND_ALG__MAX,
-};
/**
* CryptoDevBackendSymSessionInfo:
*
- * @op_code: operation code (refer to virtio_crypto.h)
* @cipher_alg: algorithm type of CIPHER
* @key_len: byte length of cipher key
* @hash_alg: algorithm type of HASH/MAC
@@ -82,7 +70,6 @@ enum CryptoDevBackendAlgType {
*/
typedef struct CryptoDevBackendSymSessionInfo {
/* corresponding with virtio crypto spec */
- uint32_t op_code;
uint32_t cipher_alg;
uint32_t key_len;
uint32_t hash_alg;
@@ -98,10 +85,36 @@ typedef struct CryptoDevBackendSymSessionInfo {
} CryptoDevBackendSymSessionInfo;
/**
+ * CryptoDevBackendAsymSessionInfo:
+ */
+typedef struct CryptoDevBackendRsaPara {
+ uint32_t padding_algo;
+ uint32_t hash_algo;
+} CryptoDevBackendRsaPara;
+
+typedef struct CryptoDevBackendAsymSessionInfo {
+ /* corresponding with virtio crypto spec */
+ uint32_t algo;
+ uint32_t keytype;
+ uint32_t keylen;
+ uint8_t *key;
+ union {
+ CryptoDevBackendRsaPara rsa;
+ } u;
+} CryptoDevBackendAsymSessionInfo;
+
+typedef struct CryptoDevBackendSessionInfo {
+ uint32_t op_code;
+ union {
+ CryptoDevBackendSymSessionInfo sym_sess_info;
+ CryptoDevBackendAsymSessionInfo asym_sess_info;
+ } u;
+ uint64_t session_id;
+} CryptoDevBackendSessionInfo;
+
+/**
* CryptoDevBackendSymOpInfo:
*
- * @session_id: session index which was previously
- * created by cryptodev_backend_sym_create_session()
* @aad_len: byte length of additional authenticated data
* @iv_len: byte length of initialization vector or counter
* @src_len: byte length of source data
@@ -127,7 +140,6 @@ typedef struct CryptoDevBackendSymSessionInfo {
*
*/
typedef struct CryptoDevBackendSymOpInfo {
- uint64_t session_id;
uint32_t aad_len;
uint32_t iv_len;
uint32_t src_len;
@@ -146,34 +158,63 @@ typedef struct CryptoDevBackendSymOpInfo {
uint8_t data[];
} CryptoDevBackendSymOpInfo;
-typedef struct CryptoDevBackendClass {
+
+/**
+ * CryptoDevBackendAsymOpInfo:
+ *
+ * @src_len: byte length of source data
+ * @dst_len: byte length of destination data
+ * @src: point to the source data
+ * @dst: point to the destination data
+ *
+ */
+typedef struct CryptoDevBackendAsymOpInfo {
+ uint32_t src_len;
+ uint32_t dst_len;
+ uint8_t *src;
+ uint8_t *dst;
+} CryptoDevBackendAsymOpInfo;
+
+typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
+
+typedef struct CryptoDevBackendOpInfo {
+ QCryptodevBackendAlgType algtype;
+ uint32_t op_code;
+ uint32_t queue_index;
+ CryptoDevCompletionFunc cb;
+ void *opaque; /* argument for cb */
+ uint64_t session_id;
+ union {
+ CryptoDevBackendSymOpInfo *sym_op_info;
+ CryptoDevBackendAsymOpInfo *asym_op_info;
+ } u;
+ QTAILQ_ENTRY(CryptoDevBackendOpInfo) next;
+} CryptoDevBackendOpInfo;
+
+struct CryptoDevBackendClass {
ObjectClass parent_class;
void (*init)(CryptoDevBackend *backend, Error **errp);
void (*cleanup)(CryptoDevBackend *backend, Error **errp);
- int64_t (*create_session)(CryptoDevBackend *backend,
- CryptoDevBackendSymSessionInfo *sess_info,
- uint32_t queue_index, Error **errp);
+ int (*create_session)(CryptoDevBackend *backend,
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
int (*close_session)(CryptoDevBackend *backend,
- uint64_t session_id,
- uint32_t queue_index, Error **errp);
- int (*do_sym_op)(CryptoDevBackend *backend,
- CryptoDevBackendSymOpInfo *op_info,
- uint32_t queue_index, Error **errp);
-} CryptoDevBackendClass;
-
-typedef enum CryptoDevBackendOptionsType {
- CRYPTODEV_BACKEND_TYPE_NONE = 0,
- CRYPTODEV_BACKEND_TYPE_BUILTIN = 1,
- CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2,
- CRYPTODEV_BACKEND_TYPE__MAX,
-} CryptoDevBackendOptionsType;
+ uint64_t session_id,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
+
+ int (*do_op)(CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info);
+};
struct CryptoDevBackendClient {
- CryptoDevBackendOptionsType type;
- char *model;
- char *name;
+ QCryptodevBackendType type;
char *info_str;
unsigned int queue_index;
int vring_enable;
@@ -198,6 +239,7 @@ struct CryptoDevBackendConf {
uint32_t mac_algo_l;
uint32_t mac_algo_h;
uint32_t aead_algo;
+ uint32_t akcipher_algo;
/* Maximum length of cipher key */
uint32_t max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -206,6 +248,24 @@ struct CryptoDevBackendConf {
uint64_t max_size;
};
+typedef struct CryptodevBackendSymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+} CryptodevBackendSymStat;
+
+typedef struct CryptodevBackendAsymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t sign_ops;
+ int64_t verify_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+ int64_t sign_bytes;
+ int64_t verify_bytes;
+} CryptodevBackendAsymStat;
+
struct CryptoDevBackend {
Object parent_obj;
@@ -213,15 +273,48 @@ struct CryptoDevBackend {
/* Tag the cryptodev backend is used by virtio-crypto or not */
bool is_used;
CryptoDevBackendConf conf;
+ CryptodevBackendSymStat *sym_stat;
+ CryptodevBackendAsymStat *asym_stat;
+
+ ThrottleState ts;
+ ThrottleTimers tt;
+ ThrottleConfig tc;
+ QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos;
};
+#define CryptodevSymStatInc(be, op, bytes) do { \
+ be->sym_stat->op##_bytes += (bytes); \
+ be->sym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevSymStatIncEncrypt(be, bytes) \
+ CryptodevSymStatInc(be, encrypt, bytes)
+
+#define CryptodevSymStatIncDecrypt(be, bytes) \
+ CryptodevSymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatInc(be, op, bytes) do { \
+ be->asym_stat->op##_bytes += (bytes); \
+ be->asym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevAsymStatIncEncrypt(be, bytes) \
+ CryptodevAsymStatInc(be, encrypt, bytes)
+
+#define CryptodevAsymStatIncDecrypt(be, bytes) \
+ CryptodevAsymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatIncSign(be, bytes) \
+ CryptodevAsymStatInc(be, sign, bytes)
+
+#define CryptodevAsymStatIncVerify(be, bytes) \
+ CryptodevAsymStatInc(be, verify, bytes)
+
+
/**
* cryptodev_backend_new_client:
- * @model: the cryptodev backend model
- * @name: the cryptodev backend name, can be NULL
*
- * Creates a new cryptodev backend client object
- * with the @name in the model @model.
+ * Creates a new cryptodev backend client object.
*
* The returned object must be released with
* cryptodev_backend_free_client() when no
@@ -229,9 +322,8 @@ struct CryptoDevBackend {
*
* Returns: a new cryptodev backend client object
*/
-CryptoDevBackendClient *
-cryptodev_backend_new_client(const char *model,
- const char *name);
+CryptoDevBackendClient *cryptodev_backend_new_client(void);
+
/**
* cryptodev_backend_free_client:
* @cc: the cryptodev backend client object
@@ -247,7 +339,7 @@ void cryptodev_backend_free_client(
* @backend: the cryptodev backend object
* @errp: pointer to a NULL-initialized error object
*
- * Clean the resouce associated with @backend that realizaed
+ * Clean the resource associated with @backend that realizaed
* by the specific backend's init() callback
*/
void cryptodev_backend_cleanup(
@@ -255,60 +347,67 @@ void cryptodev_backend_cleanup(
Error **errp);
/**
- * cryptodev_backend_sym_create_session:
+ * cryptodev_backend_create_session:
* @backend: the cryptodev backend object
* @sess_info: parameters needed by session creating
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
*
- * Create a session for symmetric algorithms
+ * Create a session for symmetric/asymmetric algorithms
*
- * Returns: session id on success, or -1 on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
-int64_t cryptodev_backend_sym_create_session(
+int cryptodev_backend_create_session(
CryptoDevBackend *backend,
- CryptoDevBackendSymSessionInfo *sess_info,
- uint32_t queue_index, Error **errp);
+ CryptoDevBackendSessionInfo *sess_info,
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
/**
- * cryptodev_backend_sym_close_session:
+ * cryptodev_backend_close_session:
* @backend: the cryptodev backend object
* @session_id: the session id
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
+ * @cb: callback when session create is compeleted
+ * @opaque: parameter passed to callback
*
- * Close a session for symmetric algorithms which was previously
- * created by cryptodev_backend_sym_create_session()
+ * Close a session for which was previously
+ * created by cryptodev_backend_create_session()
*
- * Returns: 0 on success, or Negative on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
-int cryptodev_backend_sym_close_session(
+int cryptodev_backend_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
- uint32_t queue_index, Error **errp);
+ uint32_t queue_index,
+ CryptoDevCompletionFunc cb,
+ void *opaque);
/**
* cryptodev_backend_crypto_operation:
* @backend: the cryptodev backend object
- * @opaque: pointer to a VirtIOCryptoReq object
- * @queue_index: queue index of cryptodev backend client
- * @errp: pointer to a NULL-initialized error object
+ * @op_info: pointer to a CryptoDevBackendOpInfo object
*
- * Do crypto operation, such as encryption and
- * decryption
+ * Do crypto operation, such as encryption, decryption, signature and
+ * verification
*
- * Returns: VIRTIO_CRYPTO_OK on success,
- * or -VIRTIO_CRYPTO_* on error
+ * Returns: 0 for success and cb will be called when creation is completed,
+ * negative value for error, and cb will not be called.
*/
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
- void *opaque,
- uint32_t queue_index, Error **errp);
+ CryptoDevBackendOpInfo *op_info);
/**
* cryptodev_backend_set_used:
* @backend: the cryptodev backend object
- * @used: ture or false
+ * @used: true or false
*
* Set the cryptodev backend is used by virtio-crypto or not
*/
@@ -328,7 +427,7 @@ bool cryptodev_backend_is_used(CryptoDevBackend *backend);
/**
* cryptodev_backend_set_ready:
* @backend: the cryptodev backend object
- * @ready: ture or false
+ * @ready: true or false
*
* Set the cryptodev backend is ready or not, which is called
* by the children of the cryptodev banckend interface.
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
index 982c89345f..8eab395934 100644
--- a/include/sysemu/device_tree.h
+++ b/include/sysemu/device_tree.h
@@ -70,6 +70,23 @@ int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
const char *property, uint64_t val);
int qemu_fdt_setprop_string(void *fdt, const char *node_path,
const char *property, const char *string);
+
+/**
+ * qemu_fdt_setprop_string_array: set a string array property
+ *
+ * @fdt: pointer to the dt blob
+ * @name: node name
+ * @prop: property array
+ * @array: pointer to an array of string pointers
+ * @len: length of array
+ *
+ * assigns a string array to a property. This function converts and
+ * array of strings to a sequential string with \0 separators before
+ * setting the property.
+ */
+int qemu_fdt_setprop_string_array(void *fdt, const char *node_path,
+ const char *prop, char **array, int len);
+
int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
const char *property,
const char *target_node_path);
@@ -104,20 +121,20 @@ uint32_t qemu_fdt_get_phandle(void *fdt, const char *path);
uint32_t qemu_fdt_alloc_phandle(void *fdt);
int qemu_fdt_nop_node(void *fdt, const char *node_path);
int qemu_fdt_add_subnode(void *fdt, const char *name);
+int qemu_fdt_add_path(void *fdt, const char *path);
#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
do { \
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
- qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
+ for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
+ qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
} \
qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
sizeof(qdt_tmp)); \
} while (0)
void qemu_fdt_dumpdtb(void *fdt, int size);
+void hmp_dumpdtb(Monitor *mon, const QDict *qdict);
/**
* qemu_fdt_setprop_sized_cells_from_array:
@@ -178,6 +195,15 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
qdt_tmp); \
})
+
+/**
+ * qemu_fdt_randomize_seeds:
+ * @fdt: device tree blob
+ *
+ * Re-randomize all "rng-seed" properties with new seeds.
+ */
+void qemu_fdt_randomize_seeds(void *fdt);
+
#define FDT_PCI_RANGE_RELOCATABLE 0x80000000
#define FDT_PCI_RANGE_PREFETCHABLE 0x40000000
#define FDT_PCI_RANGE_ALIASED 0x20000000
diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h
new file mode 100644
index 0000000000..d11ebbbbdb
--- /dev/null
+++ b/include/sysemu/dirtylimit.h
@@ -0,0 +1,39 @@
+/*
+ * Dirty page rate limit common functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_DIRTYRLIMIT_H
+#define QEMU_DIRTYRLIMIT_H
+
+#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */
+
+int64_t vcpu_dirty_rate_get(int cpu_index);
+void vcpu_dirty_rate_stat_start(void);
+void vcpu_dirty_rate_stat_stop(void);
+void vcpu_dirty_rate_stat_initialize(void);
+void vcpu_dirty_rate_stat_finalize(void);
+
+void dirtylimit_state_lock(void);
+void dirtylimit_state_unlock(void);
+void dirtylimit_state_initialize(void);
+void dirtylimit_state_finalize(void);
+bool dirtylimit_in_service(void);
+bool dirtylimit_vcpu_index_valid(int cpu_index);
+void dirtylimit_process(void);
+void dirtylimit_change(bool start);
+void dirtylimit_set_vcpu(int cpu_index,
+ uint64_t quota,
+ bool enable);
+void dirtylimit_set_all(uint64_t quota,
+ bool enable);
+void dirtylimit_vcpu_execute(CPUState *cpu);
+uint64_t dirtylimit_throttle_time_per_round(void);
+uint64_t dirtylimit_ring_full_time(void);
+#endif
diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h
new file mode 100644
index 0000000000..20813f303f
--- /dev/null
+++ b/include/sysemu/dirtyrate.h
@@ -0,0 +1,30 @@
+/*
+ * dirty page rate helper functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_DIRTYRATE_H
+#define QEMU_DIRTYRATE_H
+
+#include "qapi/qapi-types-migration.h"
+
+typedef struct VcpuStat {
+ int nvcpu; /* number of vcpu */
+ DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */
+} VcpuStat;
+
+int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
+ VcpuStat *stat,
+ unsigned int flag,
+ bool one_shot);
+
+void global_dirty_log_change(unsigned int flag,
+ bool start);
+#endif
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index 80c5bc3e02..a1ac5bc1b5 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -1,7 +1,7 @@
/*
* DMA helper functions
*
- * Copyright (c) 2009 Red Hat
+ * Copyright (c) 2009, 2020 Red Hat
*
* This work is licensed under the terms of the GNU General Public License
* (GNU GPL), version 2 or later.
@@ -15,24 +15,11 @@
#include "block/block.h"
#include "block/accounting.h"
-typedef struct ScatterGatherEntry ScatterGatherEntry;
-
typedef enum {
DMA_DIRECTION_TO_DEVICE = 0,
DMA_DIRECTION_FROM_DEVICE = 1,
} DMADirection;
-struct QEMUSGList {
- ScatterGatherEntry *sg;
- int nsg;
- int nalloc;
- size_t size;
- DeviceState *dev;
- AddressSpace *as;
-};
-
-#ifndef CONFIG_USER_ONLY
-
/*
* When an IOMMU is present, bus addresses become distinct from
* CPU/memory physical addresses and may be a different size. Because
@@ -45,6 +32,17 @@ typedef uint64_t dma_addr_t;
#define DMA_ADDR_BITS 64
#define DMA_ADDR_FMT "%" PRIx64
+typedef struct ScatterGatherEntry ScatterGatherEntry;
+
+struct QEMUSGList {
+ ScatterGatherEntry *sg;
+ int nsg;
+ int nalloc;
+ dma_addr_t size;
+ DeviceState *dev;
+ AddressSpace *as;
+};
+
static inline void dma_barrier(AddressSpace *as, DMADirection dir)
{
/*
@@ -73,71 +71,164 @@ static inline void dma_barrier(AddressSpace *as, DMADirection dir)
* dma_memory_{read,write}() and check for errors */
static inline bool dma_memory_valid(AddressSpace *as,
dma_addr_t addr, dma_addr_t len,
- DMADirection dir)
+ DMADirection dir, MemTxAttrs attrs)
{
return address_space_access_valid(as, addr, len,
dir == DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
+ attrs);
}
-static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir)
+static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir,
+ MemTxAttrs attrs)
{
- return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
- buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+ return address_space_rw(as, addr, attrs,
+ buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
}
-static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len)
+static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ void *buf, dma_addr_t len)
{
- return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+ return dma_memory_rw_relaxed(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
}
-static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr,
- const void *buf, dma_addr_t len)
+static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
+ dma_addr_t addr,
+ const void *buf,
+ dma_addr_t len)
{
return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE,
+ MEMTXATTRS_UNSPECIFIED);
}
-static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len,
- DMADirection dir)
+/**
+ * dma_memory_rw: Read from or write to an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
+ * @dir: indicates the transfer direction
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir, MemTxAttrs attrs)
{
dma_barrier(as, dir);
- return dma_memory_rw_relaxed(as, addr, buf, len, dir);
+ return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
}
-static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr,
- void *buf, dma_addr_t len)
+/**
+ * dma_memory_read: Read from an address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault). Called within RCU critical section.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: length of the data transferred
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
{
- return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+ return dma_memory_rw(as, addr, buf, len,
+ DMA_DIRECTION_TO_DEVICE, attrs);
}
-static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr,
- const void *buf, dma_addr_t len)
+/**
+ * address_space_write: Write to address space from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ * @attrs: memory transaction attributes
+ */
+static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
+ const void *buf, dma_addr_t len,
+ MemTxAttrs attrs)
{
return dma_memory_rw(as, addr, (void *)buf, len,
- DMA_DIRECTION_FROM_DEVICE);
+ DMA_DIRECTION_FROM_DEVICE, attrs);
}
-int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len);
+/**
+ * dma_memory_set: Fill memory with a constant byte from DMA controller.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
+ uint8_t c, dma_addr_t len, MemTxAttrs attrs);
+/**
+ * address_space_map: Map a physical memory region into a host virtual address.
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL and set *@plen to zero(0), if resources needed to perform
+ * the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: pointer to length of buffer; updated on return
+ * @dir: indicates the transfer direction
+ * @attrs: memory attributes
+ */
static inline void *dma_memory_map(AddressSpace *as,
dma_addr_t addr, dma_addr_t *len,
- DMADirection dir)
+ DMADirection dir, MemTxAttrs attrs)
{
hwaddr xlen = *len;
void *p;
p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
- MEMTXATTRS_UNSPECIFIED);
+ attrs);
*len = xlen;
return p;
}
+/**
+ * address_space_unmap: Unmaps a memory region previously mapped
+ * by dma_memory_map()
+ *
+ * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
+ * @access_len gives the amount of memory that was actually read or written
+ * by the caller.
+ *
+ * @as: #AddressSpace used
+ * @buffer: host pointer as returned by address_space_map()
+ * @len: buffer length as returned by address_space_map()
+ * @dir: indicates the transfer direction
+ * @access_len: amount of data actually transferred
+ */
static inline void dma_memory_unmap(AddressSpace *as,
void *buffer, dma_addr_t len,
DMADirection dir, dma_addr_t access_len)
@@ -147,32 +238,34 @@ static inline void dma_memory_unmap(AddressSpace *as,
}
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
- static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr) \
- { \
- uint##_bits##_t val; \
- dma_memory_read(as, addr, &val, (_bits) / 8); \
- return _end##_bits##_to_cpu(val); \
- } \
- static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
- dma_addr_t addr, \
- uint##_bits##_t val) \
- { \
- val = cpu_to_##_end##_bits(val); \
- dma_memory_write(as, addr, &val, (_bits) / 8); \
+ static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t *pval, \
+ MemTxAttrs attrs) \
+ { \
+ MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
+ _end##_bits##_to_cpus(pval); \
+ return res; \
+ } \
+ static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
+ dma_addr_t addr, \
+ uint##_bits##_t val, \
+ MemTxAttrs attrs) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+ return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
}
-static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
+static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t *val, MemTxAttrs attrs)
{
- uint8_t val;
-
- dma_memory_read(as, addr, &val, 1);
- return val;
+ return dma_memory_read(as, addr, val, 1, attrs);
}
-static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
+static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
+ uint8_t val, MemTxAttrs attrs)
{
- dma_memory_write(as, addr, &val, 1);
+ return dma_memory_write(as, addr, &val, 1, attrs);
}
DEFINE_LDST_DMA(uw, w, 16, le);
@@ -193,7 +286,6 @@ void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
AddressSpace *as);
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
-#endif
typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
BlockCompletionFunc *cb, void *cb_opaque,
@@ -209,10 +301,24 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t offset, uint32_t align,
BlockCompletionFunc *cb, void *opaque);
-uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
-uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
+MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
+MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
+ QEMUSGList *sg, MemTxAttrs attrs);
void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
QEMUSGList *sg, enum BlockAcctType type);
+/**
+ * dma_aligned_pow2_mask: Return the address bit mask of the largest
+ * power of 2 size less or equal than @end - @start + 1, aligned with @start,
+ * and bounded by 1 << @max_addr_bits bits.
+ *
+ * @start: range start address
+ * @end: range end address (greater than @start)
+ * @max_addr_bits: max address bits (<= 64)
+ */
+uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
+ int max_addr_bits);
+
#endif
diff --git a/include/sysemu/dump-arch.h b/include/sysemu/dump-arch.h
index e25b02e990..743916e46c 100644
--- a/include/sysemu/dump-arch.h
+++ b/include/sysemu/dump-arch.h
@@ -21,6 +21,10 @@ typedef struct ArchDumpInfo {
uint32_t page_size; /* The target's page size. If it's variable and
* unknown, then this should be the maximum. */
uint64_t phys_base; /* The target's physmem base. */
+ void (*arch_sections_add_fn)(DumpState *s);
+ uint64_t (*arch_sections_write_hdr_fn)(DumpState *s, uint8_t *buff);
+ int (*arch_sections_write_fn)(DumpState *s, uint8_t *buff);
+ void (*arch_cleanup_fn)(DumpState *s);
} ArchDumpInfo;
struct GuestPhysBlockList; /* memory_mapping.h */
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
index 250143cb5a..d702854853 100644
--- a/include/sysemu/dump.h
+++ b/include/sysemu/dump.h
@@ -15,6 +15,7 @@
#define DUMP_H
#include "qapi/qapi-types-dump.h"
+#include "qemu/thread.h"
#define MAKEDUMPFILE_SIGNATURE "makedumpfile"
#define MAX_SIZE_MDF_HEADER (4096) /* max size of makedumpfile_header */
@@ -136,7 +137,7 @@ typedef struct QEMU_PACKED KdumpSubHeader64 {
} KdumpSubHeader64;
typedef struct DataCache {
- int fd; /* fd of the file where to write the cached data */
+ DumpState *state; /* dump state related to this data */
uint8_t *buf; /* buffer for cached data */
size_t buf_size; /* size of the buf */
size_t data_size; /* size of cached data in buf */
@@ -154,20 +155,36 @@ typedef struct DumpState {
GuestPhysBlockList guest_phys_blocks;
ArchDumpInfo dump_info;
MemoryMappingList list;
- uint16_t phdr_num;
- uint32_t sh_info;
- bool have_section;
bool resume;
bool detached;
- ssize_t note_size;
+ bool kdump_raw;
hwaddr memory_offset;
int fd;
- GuestPhysBlock *next_block;
- ram_addr_t start;
- bool has_filter;
- int64_t begin;
- int64_t length;
+ /*
+ * Dump filter area variables
+ *
+ * A filtered dump only contains the guest memory designated by
+ * the start address and length variables defined below.
+ *
+ * If length is 0, no filtering is applied.
+ */
+ int64_t filter_area_begin; /* Start address of partial guest memory area */
+ int64_t filter_area_length; /* Length of partial guest memory area */
+
+ /* Elf dump related data */
+ uint32_t phdr_num;
+ uint32_t shdr_num;
+ ssize_t note_size;
+ hwaddr shdr_offset;
+ hwaddr phdr_offset;
+ hwaddr section_offset;
+ hwaddr note_offset;
+
+ void *elf_section_hdrs; /* Pointer to section header buffer */
+ void *elf_section_data; /* Pointer to section data buffer */
+ uint64_t elf_section_data_size; /* Size of section data */
+ GArray *string_table_buf; /* String table data buffer */
uint8_t *note_buf; /* buffer for notes */
size_t note_buf_offset; /* the writing place in note_buf */
@@ -200,4 +217,9 @@ typedef struct DumpState {
uint16_t cpu_to_dump16(DumpState *s, uint16_t val);
uint32_t cpu_to_dump32(DumpState *s, uint32_t val);
uint64_t cpu_to_dump64(DumpState *s, uint64_t val);
+
+int64_t dump_filtered_memblock_size(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
+int64_t dump_filtered_memblock_start(GuestPhysBlock *block, int64_t filter_area_start,
+ int64_t filter_area_length);
#endif
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
new file mode 100644
index 0000000000..a6c24f1351
--- /dev/null
+++ b/include/sysemu/event-loop-base.h
@@ -0,0 +1,40 @@
+/*
+ * QEMU event-loop backend
+ *
+ * Copyright (C) 2022 Red Hat Inc
+ *
+ * Authors:
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_EVENT_LOOP_BASE_H
+#define QEMU_EVENT_LOOP_BASE_H
+
+#include "qom/object.h"
+#include "block/aio.h"
+
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
+ EVENT_LOOP_BASE)
+
+struct EventLoopBaseClass {
+ ObjectClass parent_class;
+
+ void (*init)(EventLoopBase *base, Error **errp);
+ void (*update_params)(EventLoopBase *base, Error **errp);
+ bool (*can_be_deleted)(EventLoopBase *base);
+};
+
+struct EventLoopBase {
+ Object parent;
+
+ /* AioContext AIO engine parameters */
+ int64_t aio_max_batch;
+
+ /* AioContext thread pool parameters */
+ int64_t thread_pool_min;
+ int64_t thread_pool_max;
+};
+#endif
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
deleted file mode 100644
index 9b27e65cc7..0000000000
--- a/include/sysemu/hax.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * QEMU HAXM support
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * Copyright (c) 2011 Intel Corporation
- * Written by:
- * Jiang Yunhong<yunhong.jiang@intel.com>
- * Xin Xiaohui<xiaohui.xin@intel.com>
- * Zhang Xiantao<xiantao.zhang@intel.com>
- *
- * Copyright 2016 Google, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_HAX_H
-#define QEMU_HAX_H
-
-
-int hax_sync_vcpus(void);
-int hax_init_vcpu(CPUState *cpu);
-int hax_smp_cpu_exec(CPUState *cpu);
-int hax_populate_ram(uint64_t va, uint64_t size);
-
-void hax_cpu_synchronize_state(CPUState *cpu);
-void hax_cpu_synchronize_post_reset(CPUState *cpu);
-void hax_cpu_synchronize_post_init(CPUState *cpu);
-void hax_cpu_synchronize_pre_loadvm(CPUState *cpu);
-
-#ifdef CONFIG_HAX
-
-int hax_enabled(void);
-
-#include "qemu/bitops.h"
-#include "exec/memory.h"
-int hax_vcpu_destroy(CPUState *cpu);
-void hax_raise_event(CPUState *cpu);
-void hax_reset_vcpu_state(void *opaque);
-#include "target/i386/hax-interface.h"
-#include "target/i386/hax-i386.h"
-
-#else /* CONFIG_HAX */
-
-#define hax_enabled() (0)
-
-#endif /* CONFIG_HAX */
-
-#endif /* QEMU_HAX_H */
diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h
index 8276e53683..04b884bf42 100644
--- a/include/sysemu/hostmem.h
+++ b/include/sysemu/hostmem.h
@@ -18,14 +18,11 @@
#include "qom/object.h"
#include "exec/memory.h"
#include "qemu/bitmap.h"
+#include "qemu/thread-context.h"
#define TYPE_MEMORY_BACKEND "memory-backend"
-#define MEMORY_BACKEND(obj) \
- OBJECT_CHECK(HostMemoryBackend, (obj), TYPE_MEMORY_BACKEND)
-#define MEMORY_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(HostMemoryBackendClass, (obj), TYPE_MEMORY_BACKEND)
-#define MEMORY_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(HostMemoryBackendClass, (klass), TYPE_MEMORY_BACKEND)
+OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass,
+ MEMORY_BACKEND)
/* hostmem-ram.c */
/**
@@ -42,8 +39,6 @@
*/
#define TYPE_MEMORY_BACKEND_FILE "memory-backend-file"
-typedef struct HostMemoryBackend HostMemoryBackend;
-typedef struct HostMemoryBackendClass HostMemoryBackendClass;
/**
* HostMemoryBackendClass:
@@ -52,7 +47,15 @@ typedef struct HostMemoryBackendClass HostMemoryBackendClass;
struct HostMemoryBackendClass {
ObjectClass parent_class;
- void (*alloc)(HostMemoryBackend *backend, Error **errp);
+ /**
+ * alloc: Allocate memory from backend.
+ *
+ * @backend: the #HostMemoryBackend.
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Return: true on success, else false setting @errp with error.
+ */
+ bool (*alloc)(HostMemoryBackend *backend, Error **errp);
};
/**
@@ -70,8 +73,10 @@ struct HostMemoryBackend {
/* protected */
uint64_t size;
bool merge, dump, use_canonical_path;
- bool prealloc, is_mapped, share;
+ bool prealloc, is_mapped, share, reserve;
+ bool guest_memfd;
uint32_t prealloc_threads;
+ ThreadContext *prealloc_context;
DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
HostMemPolicy policy;
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
index 6d3ee4fdb7..4a7c6af3a5 100644
--- a/include/sysemu/hvf.h
+++ b/include/sysemu/hvf.h
@@ -13,27 +13,59 @@
#ifndef HVF_H
#define HVF_H
+#include "qemu/accel.h"
+#include "qom/object.h"
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
+
#ifdef CONFIG_HVF
-uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
- int reg);
extern bool hvf_allowed;
#define hvf_enabled() (hvf_allowed)
#else /* !CONFIG_HVF */
#define hvf_enabled() 0
-#define hvf_get_supported_cpuid(func, idx, reg) 0
#endif /* !CONFIG_HVF */
-int hvf_init_vcpu(CPUState *);
-int hvf_vcpu_exec(CPUState *);
-void hvf_cpu_synchronize_state(CPUState *);
-void hvf_cpu_synchronize_post_reset(CPUState *);
-void hvf_cpu_synchronize_post_init(CPUState *);
-void hvf_cpu_synchronize_pre_loadvm(CPUState *);
-void hvf_vcpu_destroy(CPUState *);
+#endif /* NEED_CPU_H */
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
-#define HVF_STATE(obj) \
- OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
+typedef struct HVFState HVFState;
+DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE,
+ TYPE_HVF_ACCEL)
+
+#ifdef NEED_CPU_H
+struct hvf_sw_breakpoint {
+ vaddr pc;
+ vaddr saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(hvf_sw_breakpoint) entry;
+};
+
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu,
+ vaddr pc);
+int hvf_sw_breakpoints_active(CPUState *cpu);
+
+int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
+void hvf_arch_remove_all_hw_breakpoints(void);
+
+/*
+ * hvf_update_guest_debug:
+ * @cs: CPUState for the CPU to update
+ *
+ * Update guest to enable or disable debugging. Per-arch specifics will be
+ * handled by calling down to hvf_arch_update_guest_debug.
+ */
+int hvf_update_guest_debug(CPUState *cpu);
+void hvf_arch_update_guest_debug(CPUState *cpu);
+
+/*
+ * Return whether the guest supports debugging.
+ */
+bool hvf_arch_supports_guest_debug(void);
+#endif /* NEED_CPU_H */
#endif
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
new file mode 100644
index 0000000000..718beddcdd
--- /dev/null
+++ b/include/sysemu/hvf_int.h
@@ -0,0 +1,70 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in HVF-specific code */
+
+#ifndef HVF_INT_H
+#define HVF_INT_H
+
+#ifdef __aarch64__
+#include <Hypervisor/Hypervisor.h>
+#else
+#include <Hypervisor/hv.h>
+#endif
+
+/* hvf_slot flags */
+#define HVF_SLOT_LOG (1 << 0)
+
+typedef struct hvf_slot {
+ uint64_t start;
+ uint64_t size;
+ uint8_t *mem;
+ int slot_id;
+ uint32_t flags;
+ MemoryRegion *region;
+} hvf_slot;
+
+typedef struct hvf_vcpu_caps {
+ uint64_t vmx_cap_pinbased;
+ uint64_t vmx_cap_procbased;
+ uint64_t vmx_cap_procbased2;
+ uint64_t vmx_cap_entry;
+ uint64_t vmx_cap_exit;
+ uint64_t vmx_cap_preemption_timer;
+} hvf_vcpu_caps;
+
+struct HVFState {
+ AccelState parent;
+ hvf_slot slots[32];
+ int num_slots;
+
+ hvf_vcpu_caps *hvf_caps;
+ uint64_t vtimer_offset;
+ QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints;
+};
+extern HVFState *hvf_state;
+
+struct AccelCPUState {
+ uint64_t fd;
+ void *exit;
+ bool vtimer_masked;
+ sigset_t unblock_ipi_mask;
+ bool guest_debug_enabled;
+};
+
+void assert_hvf_ok(hv_return_t ret);
+int hvf_arch_init(void);
+int hvf_arch_init_vcpu(CPUState *cpu);
+void hvf_arch_vcpu_destroy(CPUState *cpu);
+int hvf_vcpu_exec(CPUState *);
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
+int hvf_put_registers(CPUState *);
+int hvf_get_registers(CPUState *);
+void hvf_kick_vcpu_thread(CPUState *cpu);
+
+#endif
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
index e128f8b06b..c71b77e71f 100644
--- a/include/sysemu/hw_accel.h
+++ b/include/sysemu/hw_accel.h
@@ -1,5 +1,5 @@
/*
- * QEMU Hardware accelertors support
+ * QEMU Hardware accelerators support
*
* Copyright 2016 Google, Inc.
*
@@ -12,73 +12,14 @@
#define QEMU_HW_ACCEL_H
#include "hw/core/cpu.h"
-#include "sysemu/hax.h"
#include "sysemu/kvm.h"
#include "sysemu/hvf.h"
#include "sysemu/whpx.h"
+#include "sysemu/nvmm.h"
-static inline void cpu_synchronize_state(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_state(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_state(cpu);
- }
- if (hvf_enabled()) {
- hvf_cpu_synchronize_state(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_state(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_reset(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_reset(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_post_reset(cpu);
- }
- if (hvf_enabled()) {
- hvf_cpu_synchronize_post_reset(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_post_reset(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_init(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_init(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_post_init(cpu);
- }
- if (hvf_enabled()) {
- hvf_cpu_synchronize_post_init(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_post_init(cpu);
- }
-}
-
-static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_pre_loadvm(cpu);
- }
- if (hax_enabled()) {
- hax_cpu_synchronize_pre_loadvm(cpu);
- }
- if (hvf_enabled()) {
- hvf_cpu_synchronize_pre_loadvm(cpu);
- }
- if (whpx_enabled()) {
- whpx_cpu_synchronize_pre_loadvm(cpu);
- }
-}
+void cpu_synchronize_state(CPUState *cpu);
+void cpu_synchronize_post_reset(CPUState *cpu);
+void cpu_synchronize_post_init(CPUState *cpu);
+void cpu_synchronize_pre_loadvm(CPUState *cpu);
#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h
new file mode 100644
index 0000000000..9af27ebd6c
--- /dev/null
+++ b/include/sysemu/iommufd.h
@@ -0,0 +1,36 @@
+#ifndef SYSEMU_IOMMUFD_H
+#define SYSEMU_IOMMUFD_H
+
+#include "qom/object.h"
+#include "exec/hwaddr.h"
+#include "exec/cpu-common.h"
+
+#define TYPE_IOMMUFD_BACKEND "iommufd"
+OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND)
+
+struct IOMMUFDBackendClass {
+ ObjectClass parent_class;
+};
+
+struct IOMMUFDBackend {
+ Object parent;
+
+ /*< protected >*/
+ int fd; /* /dev/iommu file descriptor */
+ bool owned; /* is the /dev/iommu opened internally */
+ uint32_t users;
+
+ /*< public >*/
+};
+
+int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
+void iommufd_backend_disconnect(IOMMUFDBackend *be);
+
+int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
+ Error **errp);
+void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
+int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly);
+int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
+ hwaddr iova, ram_addr_t size);
+#endif
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index 6181486401..2102a90eca 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -17,11 +17,12 @@
#include "block/aio.h"
#include "qemu/thread.h"
#include "qom/object.h"
+#include "sysemu/event-loop-base.h"
#define TYPE_IOTHREAD "iothread"
-typedef struct {
- Object parent_obj;
+struct IOThread {
+ EventLoopBase parent_obj;
QemuThread thread;
AioContext *ctx;
@@ -37,10 +38,11 @@ typedef struct {
int64_t poll_max_ns;
int64_t poll_grow;
int64_t poll_shrink;
-} IOThread;
+};
+typedef struct IOThread IOThread;
-#define IOTHREAD(obj) \
- OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD)
+DECLARE_INSTANCE_CHECKER(IOThread, IOTHREAD,
+ TYPE_IOTHREAD)
char *iothread_get_id(IOThread *iothread);
IOThread *iothread_by_id(const char *id);
@@ -56,4 +58,10 @@ IOThread *iothread_create(const char *id, Error **errp);
void iothread_stop(IOThread *iothread);
void iothread_destroy(IOThread *iothread);
+/*
+ * Returns true if executing within IOThread context,
+ * false otherwise.
+ */
+bool qemu_in_iothread(void);
+
#endif /* IOTHREAD_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index b4174d941c..47f9e8be1b 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -11,12 +11,14 @@
*
*/
+/* header to be included in non-KVM-specific code */
+
#ifndef QEMU_KVM_H
#define QEMU_KVM_H
-#include "qemu/queue.h"
-#include "hw/core/cpu.h"
#include "exec/memattrs.h"
+#include "qemu/accel.h"
+#include "qom/object.h"
#ifdef NEED_CPU_H
# ifdef CONFIG_KVM
@@ -34,38 +36,33 @@ extern bool kvm_kernel_irqchip;
extern bool kvm_split_irqchip;
extern bool kvm_async_interrupts_allowed;
extern bool kvm_halt_in_kernel_allowed;
-extern bool kvm_eventfds_allowed;
-extern bool kvm_irqfds_allowed;
extern bool kvm_resamplefds_allowed;
extern bool kvm_msi_via_irqfd_allowed;
extern bool kvm_gsi_routing_allowed;
extern bool kvm_gsi_direct_mapping;
extern bool kvm_readonly_mem_allowed;
-extern bool kvm_direct_msi_allowed;
-extern bool kvm_ioeventfd_any_length_allowed;
extern bool kvm_msi_use_devid;
#define kvm_enabled() (kvm_allowed)
/**
* kvm_irqchip_in_kernel:
*
- * Returns: true if the user asked us to create an in-kernel
- * irqchip via the "kernel_irqchip=on" machine option.
+ * Returns: true if an in-kernel irqchip was created.
* What this actually means is architecture and machine model
- * specific: on PC, for instance, it means that the LAPIC,
- * IOAPIC and PIT are all in kernel. This function should never
- * be used from generic target-independent code: use one of the
- * following functions or some other specific check instead.
+ * specific: on PC, for instance, it means that the LAPIC
+ * is in kernel. This function should never be used from generic
+ * target-independent code: use one of the following functions or
+ * some other specific check instead.
*/
#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
/**
* kvm_irqchip_is_split:
*
- * Returns: true if the user asked us to split the irqchip
- * implementation between user and kernel space. The details are
- * architecture and machine specific. On PC, it means that the PIC,
- * IOAPIC, and PIT are in user space while the LAPIC is in the kernel.
+ * Returns: true if the irqchip implementation is split between
+ * user and kernel space. The details are architecture and
+ * machine specific. On PC, it means that the PIC, IOAPIC, and
+ * PIT are in user space while the LAPIC is in the kernel.
*/
#define kvm_irqchip_is_split() (kvm_split_irqchip)
@@ -88,22 +85,15 @@ extern bool kvm_msi_use_devid;
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
/**
- * kvm_eventfds_enabled:
- *
- * Returns: true if we can use eventfds to receive notifications
- * from a KVM CPU (ie the kernel supports eventds and we are running
- * with a configuration where it is meaningful to use them).
- */
-#define kvm_eventfds_enabled() (kvm_eventfds_allowed)
-
-/**
* kvm_irqfds_enabled:
*
* Returns: true if we can use irqfds to inject interrupts into
* a KVM CPU (ie the kernel supports irqfds and we are running
* with a configuration where it is meaningful to use them).
+ *
+ * Always available if running with in-kernel irqchip.
*/
-#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
+#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
/**
* kvm_resamplefds_enabled:
@@ -147,19 +137,6 @@ extern bool kvm_msi_use_devid;
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
/**
- * kvm_direct_msi_enabled:
- *
- * Returns: true if KVM allows direct MSI injection.
- */
-#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed)
-
-/**
- * kvm_ioeventfd_any_length_enabled:
- * Returns: true if KVM allows any length io eventfd.
- */
-#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
-
-/**
* kvm_msi_devid_required:
* Returns: true if KVM requires a device id to be provided while
* defining an MSI routing entry.
@@ -173,21 +150,17 @@ extern bool kvm_msi_use_devid;
#define kvm_irqchip_is_split() (false)
#define kvm_async_interrupts_enabled() (false)
#define kvm_halt_in_kernel() (false)
-#define kvm_eventfds_enabled() (false)
#define kvm_irqfds_enabled() (false)
#define kvm_resamplefds_enabled() (false)
#define kvm_msi_via_irqfd_enabled() (false)
#define kvm_gsi_routing_allowed() (false)
#define kvm_gsi_direct_mapping() (false)
#define kvm_readonly_mem_enabled() (false)
-#define kvm_direct_msi_enabled() (false)
-#define kvm_ioeventfd_any_length_enabled() (false)
#define kvm_msi_devid_required() (false)
#endif /* CONFIG_KVM_IS_POSSIBLE */
struct kvm_run;
-struct kvm_lapic_state;
struct kvm_irq_routing_entry;
typedef struct KVMCapabilityInfo {
@@ -199,26 +172,28 @@ typedef struct KVMCapabilityInfo {
#define KVM_CAP_LAST_INFO { NULL, 0 }
struct KVMState;
+
+#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
typedef struct KVMState KVMState;
+DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
+ TYPE_KVM_ACCEL)
+
extern KVMState *kvm_state;
typedef struct Notifier Notifier;
+typedef struct KVMRouteChange {
+ KVMState *s;
+ int changes;
+} KVMRouteChange;
+
/* external API */
-bool kvm_has_free_slot(MachineState *ms);
+unsigned int kvm_get_max_memslots(void);
+unsigned int kvm_get_free_memslots(void);
bool kvm_has_sync_mmu(void);
int kvm_has_vcpu_events(void);
-int kvm_has_robust_singlestep(void);
-int kvm_has_debugregs(void);
int kvm_max_nested_state_length(void);
-int kvm_has_pit_state2(void);
-int kvm_has_many_ioeventfds(void);
int kvm_has_gsi_routing(void);
-int kvm_has_intx_set_mask(void);
-
-int kvm_init_vcpu(CPUState *cpu);
-int kvm_cpu_exec(CPUState *cpu);
-int kvm_destroy_vcpu(CPUState *cpu);
/**
* kvm_arm_supports_user_irq
@@ -231,41 +206,32 @@ int kvm_destroy_vcpu(CPUState *cpu);
*/
bool kvm_arm_supports_user_irq(void);
-/**
- * kvm_memcrypt_enabled - return boolean indicating whether memory encryption
- * is enabled
- * Returns: 1 memory encryption is enabled
- * 0 memory encryption is disabled
- */
-bool kvm_memcrypt_enabled(void);
-
-/**
- * kvm_memcrypt_encrypt_data: encrypt the memory range
- *
- * Return: 1 failed to encrypt the range
- * 0 succesfully encrypted memory region
- */
-int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len);
+int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+int kvm_on_sigbus(int code, void *addr);
#ifdef NEED_CPU_H
#include "cpu.h"
void kvm_flush_coalesced_mmio_buffer(void);
-int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
- target_ulong len, int type);
-int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
- target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUState *cpu);
+/**
+ * kvm_update_guest_debug(): ensure KVM debug structures updated
+ * @cs: the CPUState for this cpu
+ * @reinject_trap: KVM trap injection control
+ *
+ * There are usually per-arch specifics which will be handled by
+ * calling down to kvm_arch_update_guest_debug after the generic
+ * fields have been set.
+ */
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
-
-int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
-int kvm_on_sigbus(int code, void *addr);
-
-/* interface with exec.c */
-
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared));
+#else
+static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
+{
+ return -EINVAL;
+}
+#endif
/* internal API */
@@ -351,6 +317,8 @@ bool kvm_device_supported(int vmfd, uint64_t type);
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
+void kvm_arch_accel_class_init(ObjectClass *oc);
+
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
@@ -369,6 +337,8 @@ int kvm_arch_get_registers(CPUState *cpu);
int kvm_arch_put_registers(CPUState *cpu, int level);
+int kvm_arch_get_default_type(MachineState *ms);
+
int kvm_arch_init(MachineState *ms, KVMState *s);
int kvm_arch_init_vcpu(CPUState *cpu);
@@ -405,20 +375,18 @@ void kvm_irqchip_add_change_notifier(Notifier *n);
void kvm_irqchip_remove_change_notifier(Notifier *n);
void kvm_irqchip_change_notify(void);
-void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
-
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
struct kvm_sw_breakpoint {
- target_ulong pc;
- target_ulong saved_insn;
+ vaddr pc;
+ vaddr saved_insn;
int use_count;
QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
};
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
- target_ulong pc);
+ vaddr pc);
int kvm_sw_breakpoints_active(CPUState *cpu);
@@ -426,10 +394,8 @@ int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
- target_ulong len, int type);
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
- target_ulong len, int type);
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
void kvm_arch_remove_all_hw_breakpoints(void);
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
@@ -464,30 +430,20 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension);
kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
})
-uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
- uint32_t index, int reg);
-uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index);
-
-
void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
-#if !defined(CONFIG_USER_ONLY)
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
hwaddr *phys_addr);
-#endif
#endif /* NEED_CPU_H */
void kvm_cpu_synchronize_state(CPUState *cpu);
-void kvm_cpu_synchronize_post_reset(CPUState *cpu);
-void kvm_cpu_synchronize_post_init(CPUState *cpu);
-void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
void kvm_init_cpu_signals(CPUState *cpu);
/**
* kvm_irqchip_add_msi_route - Add MSI route for specific vector
- * @s: KVM state
+ * @c: KVMRouteChange instance.
* @vector: which vector to add. This can be either MSI/MSIX
* vector. The function will automatically detect whether
* MSI/MSIX is enabled, and fetch corresponding MSI
@@ -496,10 +452,24 @@ void kvm_init_cpu_signals(CPUState *cpu);
* as @NULL, an empty MSI message will be inited.
* @return: virq (>=0) when success, errno (<0) when failed.
*/
-int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev);
+int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
PCIDevice *dev);
void kvm_irqchip_commit_routes(KVMState *s);
+
+static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
+{
+ return (KVMRouteChange) { .s = s, .changes = 0 };
+}
+
+static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
+{
+ if (c->changes) {
+ kvm_irqchip_commit_routes(c->s);
+ c->changes = 0;
+ }
+}
+
void kvm_irqchip_release_virq(KVMState *s, int virq);
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter);
@@ -514,7 +484,6 @@ int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
qemu_irq irq);
void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
-void kvm_pc_setup_irq_routing(bool pci_enabled);
void kvm_init_irq_routing(KVMState *s);
bool kvm_kernel_irqchip_allowed(void);
@@ -552,10 +521,27 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
* Returns: 0 on success, or a negative errno on failure.
*/
int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
-struct ppc_radix_page_info *kvm_get_radix_page_info(void);
-int kvm_get_max_memslots(void);
/* Notify resamplefd for EOI of specific interrupts. */
void kvm_resample_fd_notify(int gsi);
+bool kvm_dirty_ring_enabled(void);
+
+uint32_t kvm_dirty_ring_size(void);
+
+void kvm_mark_guest_state_protected(void);
+
+/**
+ * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
+ * reported for the VM.
+ */
+bool kvm_hwpoisoned_mem(void);
+
+int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
+
+int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
+int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
+
+int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
+
#endif
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index c660a70c51..3f3d13f816 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -10,7 +10,9 @@
#define QEMU_KVM_INT_H
#include "exec/memory.h"
-#include "sysemu/accel.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/accel.h"
+#include "qemu/queue.h"
#include "sysemu/kvm.h"
typedef struct KVMSlot
@@ -23,23 +25,109 @@ typedef struct KVMSlot
int old_flags;
/* Dirty bitmap cache for the slot */
unsigned long *dirty_bmap;
+ unsigned long dirty_bmap_size;
+ /* Cache of the address space ID */
+ int as_id;
+ /* Cache of the offset in ram address space */
+ ram_addr_t ram_start_offset;
+ int guest_memfd;
+ hwaddr guest_memfd_offset;
} KVMSlot;
+typedef struct KVMMemoryUpdate {
+ QSIMPLEQ_ENTRY(KVMMemoryUpdate) next;
+ MemoryRegionSection section;
+} KVMMemoryUpdate;
+
typedef struct KVMMemoryListener {
MemoryListener listener;
- /* Protects the slots and all inside them */
- QemuMutex slots_lock;
KVMSlot *slots;
+ unsigned int nr_used_slots;
int as_id;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
+ QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
} KVMMemoryListener;
-#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
+#define KVM_MSI_HASHTAB_SIZE 256
+
+enum KVMDirtyRingReaperState {
+ KVM_DIRTY_RING_REAPER_NONE = 0,
+ /* The reaper is sleeping */
+ KVM_DIRTY_RING_REAPER_WAIT,
+ /* The reaper is reaping for dirty pages */
+ KVM_DIRTY_RING_REAPER_REAPING,
+};
+
+/*
+ * KVM reaper instance, responsible for collecting the KVM dirty bits
+ * via the dirty ring.
+ */
+struct KVMDirtyRingReaper {
+ /* The reaper thread */
+ QemuThread reaper_thr;
+ volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
+ volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
+};
+struct KVMState
+{
+ AccelState parent_obj;
+
+ int nr_slots;
+ int fd;
+ int vmfd;
+ int coalesced_mmio;
+ int coalesced_pio;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ bool coalesced_flush_in_progress;
+ int vcpu_events;
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
+ QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
+#endif
+ int max_nested_state_len;
+ int kvm_shadow_mem;
+ bool kernel_irqchip_allowed;
+ bool kernel_irqchip_required;
+ OnOffAuto kernel_irqchip_split;
+ bool sync_mmu;
+ bool guest_state_protected;
+ uint64_t manual_dirty_log_protect;
+ /* The man page (and posix) say ioctl numbers are signed int, but
+ * they're not. Linux, glibc and *BSD all treat ioctl numbers as
+ * unsigned, and treating them as signed here can break things */
+ unsigned irq_set_ioctl;
+ unsigned int sigmask_len;
+ GHashTable *gsimap;
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing *irq_routes;
+ int nr_allocated_irq_routes;
+ unsigned long *used_gsi_bitmap;
+ unsigned int gsi_count;
+#endif
+ KVMMemoryListener memory_listener;
+ QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
-#define KVM_STATE(obj) \
- OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
+ /* For "info mtree -f" to tell if an MR is registered in KVM */
+ int nr_as;
+ struct KVMAs {
+ KVMMemoryListener *ml;
+ AddressSpace *as;
+ } *as;
+ uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
+ uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
+ bool kvm_dirty_ring_with_bitmap;
+ uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
+ struct KVMDirtyRingReaper reaper;
+ NotifyVmexitOption notify_vmexit;
+ uint32_t notify_window;
+ uint32_t xen_version;
+ uint32_t xen_caps;
+ uint16_t xen_gnttab_max_frames;
+ uint16_t xen_evtchn_max_pirq;
+ char *device;
+};
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
- AddressSpace *as, int as_id);
+ AddressSpace *as, int as_id, const char *name);
void kvm_set_max_memslot_size(hwaddr max_slot_size);
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
new file mode 100644
index 0000000000..961c702c4e
--- /dev/null
+++ b/include/sysemu/kvm_xen.h
@@ -0,0 +1,44 @@
+/*
+ * Xen HVM emulation support in KVM
+ *
+ * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_SYSEMU_KVM_XEN_H
+#define QEMU_SYSEMU_KVM_XEN_H
+
+/* The KVM API uses these to indicate "no GPA" or "no GFN" */
+#define INVALID_GPA UINT64_MAX
+#define INVALID_GFN UINT64_MAX
+
+/* QEMU plays the rĂ´le of dom0 for "interdomain" communication. */
+#define DOMID_QEMU 0
+
+int kvm_xen_soft_reset(void);
+uint32_t kvm_xen_get_caps(void);
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
+bool kvm_xen_has_vcpu_callback_vector(void);
+void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type);
+void kvm_xen_set_callback_asserted(void);
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
+uint16_t kvm_xen_get_gnttab_max_frames(void);
+uint16_t kvm_xen_get_evtchn_max_pirq(void);
+
+#define kvm_xen_has_cap(cap) (!!(kvm_xen_get_caps() & \
+ KVM_XEN_HVM_CONFIG_ ## cap))
+
+#define XEN_SPECIAL_AREA_ADDR 0xfeff8000UL
+#define XEN_SPECIAL_AREA_SIZE 0x4000UL
+
+#define XEN_SPECIALPAGE_CONSOLE 0
+#define XEN_SPECIALPAGE_XENSTORE 1
+
+#define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \
+ XEN_SPECIALPAGE_##x)
+
+#endif /* QEMU_SYSEMU_KVM_XEN_H */
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 4b20f1a639..021e0a6230 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -15,8 +15,7 @@
#define MEMORY_MAPPING_H
#include "qemu/queue.h"
-#include "exec/cpu-defs.h"
-#include "exec/memory.h"
+#include "exec/cpu-common.h"
typedef struct GuestPhysBlock {
/* visible to guest, reflects PCI hole, etc */
@@ -43,7 +42,7 @@ typedef struct GuestPhysBlockList {
/* The physical and virtual address in the memory mapping are contiguous. */
typedef struct MemoryMapping {
hwaddr phys_addr;
- target_ulong virt_addr;
+ vaddr virt_addr;
ram_addr_t length;
QTAILQ_ENTRY(MemoryMapping) next;
} MemoryMapping;
@@ -72,7 +71,7 @@ void guest_phys_blocks_free(GuestPhysBlockList *list);
void guest_phys_blocks_init(GuestPhysBlockList *list);
void guest_phys_blocks_append(GuestPhysBlockList *list);
-void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
const GuestPhysBlockList *guest_phys_blocks,
Error **errp);
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index ad58ee88f7..825cfe86bc 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -41,6 +41,7 @@ struct NodeInfo {
struct HostMemoryBackend *node_memdev;
bool present;
bool has_cpu;
+ bool has_gi;
uint8_t lb_info_provided;
uint16_t initiator;
uint8_t distance[MAX_NODES];
@@ -106,10 +107,6 @@ void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node,
void numa_complete_configuration(MachineState *ms);
void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms);
extern QemuOptsList qemu_numa_opts;
-void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
-void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
- int nb_nodes, ram_addr_t size);
void numa_cpu_pre_plug(const struct CPUArchId *slot, DeviceState *dev,
Error **errp);
bool numa_uses_legacy_mem(void);
diff --git a/include/sysemu/nvmm.h b/include/sysemu/nvmm.h
new file mode 100644
index 0000000000..be7bc9a62d
--- /dev/null
+++ b/include/sysemu/nvmm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator support.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/* header to be included in non-NVMM-specific code */
+
+#ifndef QEMU_NVMM_H
+#define QEMU_NVMM_H
+
+#ifdef NEED_CPU_H
+
+#ifdef CONFIG_NVMM
+
+int nvmm_enabled(void);
+
+#else /* CONFIG_NVMM */
+
+#define nvmm_enabled() (0)
+
+#endif /* CONFIG_NVMM */
+
+#endif /* NEED_CPU_H */
+
+#endif /* QEMU_NVMM_H */
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
index 629c8c648b..b881ac6c6f 100644
--- a/include/sysemu/os-posix.h
+++ b/include/sysemu/os-posix.h
@@ -38,21 +38,23 @@
#include <sys/sysmacros.h>
#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
void os_set_proc_name(const char *s);
void os_setup_signal_handling(void);
+int os_set_daemonize(bool d);
+bool is_daemonized(void);
void os_daemonize(void);
+bool os_set_runas(const char *user_id);
+void os_set_chroot(const char *path);
+void os_setup_limits(void);
void os_setup_post(void);
int os_mlock(void);
-#define closesocket(s) close(s)
-#define ioctlsocket(s, r, v) ioctl(s, r, v)
-
-typedef struct timeval qemu_timeval;
-#define qemu_gettimeofday(tp) gettimeofday(tp, NULL)
-
-bool is_daemonized(void);
-
/**
* qemu_alloc_stack:
* @sz: pointer to a size_t holding the requested usable stack size
@@ -92,4 +94,8 @@ static inline void qemu_funlockfile(FILE *f)
funlockfile(f);
}
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
index d8978e28c0..b82a5d3ad9 100644
--- a/include/sysemu/os-win32.h
+++ b/include/sysemu/os-win32.h
@@ -29,15 +29,57 @@
#include <winsock2.h>
#include <windows.h>
#include <ws2tcpip.h>
+#include "qemu/typedefs.h"
-#if defined(_WIN64)
-/* On w64, setjmp is implemented by _setjmp which needs a second parameter.
+#ifdef HAVE_AFUNIX_H
+#include <afunix.h>
+#else
+/*
+ * Fallback definitions of things we need in afunix.h, if not available from
+ * the used Windows SDK or MinGW headers.
+ */
+#define UNIX_PATH_MAX 108
+
+typedef struct sockaddr_un {
+ ADDRESS_FAMILY sun_family;
+ char sun_path[UNIX_PATH_MAX];
+} SOCKADDR_UN, *PSOCKADDR_UN;
+
+#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__aarch64__)
+/*
+ * On windows-arm64, setjmp is available in only one variant, and longjmp always
+ * does stack unwinding. This crash with generated code.
+ * Thus, we use another implementation of setjmp (not windows one), coming from
+ * mingw, which never performs stack unwinding.
+ */
+#undef setjmp
+#undef longjmp
+/*
+ * These functions are not declared in setjmp.h because __aarch64__ defines
+ * setjmp to _setjmpex instead. However, they are still defined in libmingwex.a,
+ * which gets linked automatically.
+ */
+int __mingw_setjmp(jmp_buf);
+void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
+#define setjmp(env) __mingw_setjmp(env)
+#define longjmp(env, val) __mingw_longjmp(env, val)
+#elif defined(_WIN64)
+/*
+ * On windows-x64, setjmp is implemented by _setjmp which needs a second parameter.
* If this parameter is NULL, longjump does no stack unwinding.
* That is what we need for QEMU. Passing the value of register rsp (default)
- * lets longjmp try a stack unwinding which will crash with generated code. */
+ * lets longjmp try a stack unwinding which will crash with generated code.
+ */
# undef setjmp
# define setjmp(env) _setjmp(env, NULL)
-#endif
+#endif /* __aarch64__ */
/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify
* "longjmp and don't touch the signal masks". Since we know that the
* savemask parameter will always be zero we can safely define these
@@ -48,18 +90,19 @@
#define siglongjmp(env, val) longjmp(env, val)
/* Missing POSIX functions. Don't use MinGW-w64 macros. */
-#ifndef CONFIG_LOCALTIME_R
+#ifndef _POSIX_THREAD_SAFE_FUNCTIONS
#undef gmtime_r
struct tm *gmtime_r(const time_t *timep, struct tm *result);
#undef localtime_r
struct tm *localtime_r(const time_t *timep, struct tm *result);
-#endif /* CONFIG_LOCALTIME_R */
+#endif /* _POSIX_THREAD_SAFE_FUNCTIONS */
static inline void os_setup_signal_handling(void) {}
static inline void os_daemonize(void) {}
static inline void os_setup_post(void) {}
-void os_set_line_buffering(void);
static inline void os_set_proc_name(const char *dummy) {}
+void os_set_line_buffering(void);
+void os_setup_early_signal_handling(void);
int getpagesize(void);
@@ -67,11 +110,13 @@ int getpagesize(void);
# define EPROTONOSUPPORT EINVAL
#endif
-typedef struct {
- long tv_sec;
- long tv_usec;
-} qemu_timeval;
-int qemu_gettimeofday(qemu_timeval *tp);
+static inline int os_set_daemonize(bool d)
+{
+ if (d) {
+ return -ENOTSUP;
+ }
+ return 0;
+}
static inline bool is_daemonized(void)
{
@@ -83,6 +128,11 @@ static inline int os_mlock(void)
return -ENOSYS;
}
+static inline void os_setup_limits(void)
+{
+ return;
+}
+
#define fsync _commit
#if !defined(lseek)
@@ -101,25 +151,48 @@ static inline char *realpath(const char *path, char *resolved_path)
return resolved_path;
}
-/* ??? Mingw appears to export _lock_file and _unlock_file as the functions
- * with which to lock a stdio handle. But something is wrong in the markup,
- * either in the header or the library, such that we get undefined references
- * to "_imp___lock_file" etc when linking. Since we seem to have no other
- * alternative, and the usage within the logging functions isn't critical,
- * ignore FILE locking.
+/*
+ * Older versions of MinGW do not import _lock_file and _unlock_file properly.
+ * This was fixed for v6.0.0 with commit b48e3ac8969d.
*/
-
static inline void qemu_flockfile(FILE *f)
{
+#ifdef HAVE__LOCK_FILE
+ _lock_file(f);
+#endif
}
static inline void qemu_funlockfile(FILE *f)
{
+#ifdef HAVE__LOCK_FILE
+ _unlock_file(f);
+#endif
}
-/* We wrap all the sockets functions so that we can
- * set errno based on WSAGetLastError()
+/* Helper for WSAEventSelect, to report errors */
+bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
+ long lNetworkEvents, Error **errp);
+
+bool qemu_socket_unselect(int sockfd, Error **errp);
+
+/* We wrap all the sockets functions so that we can set errno based on
+ * WSAGetLastError(), and use file-descriptors instead of SOCKET.
+ */
+
+/*
+ * qemu_close_socket_osfhandle:
+ * @fd: a file descriptor associated with a SOCKET
+ *
+ * Close only the C run-time file descriptor, leave the SOCKET opened.
+ *
+ * Returns zero on success. On error, -1 is returned, and errno is set to
+ * indicate the error.
*/
+int qemu_close_socket_osfhandle(int fd);
+
+#undef close
+#define close qemu_close_wrap
+int qemu_close_wrap(int fd);
#undef connect
#define connect qemu_connect_wrap
@@ -152,10 +225,6 @@ int qemu_shutdown_wrap(int sockfd, int how);
#define ioctlsocket qemu_ioctlsocket_wrap
int qemu_ioctlsocket_wrap(int fd, int req, void *val);
-#undef closesocket
-#define closesocket qemu_closesocket_wrap
-int qemu_closesocket_wrap(int fd);
-
#undef getsockopt
#define getsockopt qemu_getsockopt_wrap
int qemu_getsockopt_wrap(int sockfd, int level, int optname,
@@ -194,4 +263,15 @@ ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags);
ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags,
struct sockaddr *addr, socklen_t *addrlen);
+EXCEPTION_DISPOSITION
+win32_close_exception_handler(struct _EXCEPTION_RECORD*, void*,
+ struct _CONTEXT*, void*);
+
+void *qemu_win32_map_alloc(size_t size, HANDLE *h, Error **errp);
+void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h
index eedd3664f0..b5d5fd3463 100644
--- a/include/sysemu/qtest.h
+++ b/include/sysemu/qtest.h
@@ -14,6 +14,7 @@
#ifndef QTEST_H
#define QTEST_H
+#include "chardev/char.h"
extern bool qtest_allowed;
@@ -22,6 +23,10 @@ static inline bool qtest_enabled(void)
return qtest_allowed;
}
+#ifndef CONFIG_USER_ONLY
+void qtest_send_prefix(CharBackend *chr);
+void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...);
+void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words));
bool qtest_driver(void);
void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
@@ -30,4 +35,7 @@ void qtest_server_set_send_handler(void (*send)(void *, const char *),
void *opaque);
void qtest_server_inproc_recv(void *opaque, const char *buf);
+int64_t qtest_get_virtual_clock(void);
+#endif
+
#endif
diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h
index 5471bb514d..f229b2109c 100644
--- a/include/sysemu/replay.h
+++ b/include/sysemu/replay.h
@@ -1,8 +1,5 @@
-#ifndef REPLAY_H
-#define REPLAY_H
-
/*
- * replay.h
+ * QEMU replay (system interface)
*
* Copyright (c) 2010-2015 Institute for System Programming
* of the Russian Academy of Sciences.
@@ -11,7 +8,14 @@
* See the COPYING file in the top-level directory.
*
*/
+#ifndef SYSEMU_REPLAY_H
+#define SYSEMU_REPLAY_H
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include this header from user emulation
+#endif
+#include "exec/replay-core.h"
#include "qapi/qapi-types-misc.h"
#include "qapi/qapi-types-run-state.h"
#include "qapi/qapi-types-ui.h"
@@ -44,8 +48,6 @@ typedef enum ReplayCheckpoint ReplayCheckpoint;
typedef struct ReplayNetState ReplayNetState;
-extern ReplayMode replay_mode;
-
/* Name of the initial VM snapshot */
extern char *replay_snapshot;
@@ -62,17 +64,6 @@ extern char *replay_snapshot;
void replay_mutex_lock(void);
void replay_mutex_unlock(void);
-/* Replay process control functions */
-
-/*! Enables recording or saving event log with specified parameters */
-void replay_configure(struct QemuOpts *opts);
-/*! Initializes timers used for snapshotting and enables events recording */
-void replay_start(void);
-/*! Closes replay log file and frees other resources. */
-void replay_finish(void);
-/*! Adds replay blocker with the specified error description */
-void replay_add_blocker(Error *reason);
-
/* Processing the instructions */
/*! Returns number of executed instructions. */
@@ -82,21 +73,10 @@ int replay_get_instructions(void);
/*! Updates instructions counter in replay mode. */
void replay_account_executed_instructions(void);
-/* Interrupts and exceptions */
-
-/*! Called by exception handler to write or read
- exception processing events. */
-bool replay_exception(void);
-/*! Used to determine that exception is pending.
- Does not proceed to the next event in the log. */
-bool replay_has_exception(void);
-/*! Called by interrupt handlers to write or read
- interrupt processing events.
- \return true if interrupt should be processed */
-bool replay_interrupt(void);
-/*! Tries to read interrupt event from the file.
- Returns true, when interrupt request is pending */
-bool replay_has_interrupt(void);
+/**
+ * replay_can_wait: check if we should pause for wait-io
+ */
+bool replay_can_wait(void);
/* Processing clocks and other time sources */
@@ -104,25 +84,22 @@ bool replay_has_interrupt(void);
int64_t replay_save_clock(ReplayClockKind kind, int64_t clock,
int64_t raw_icount);
/*! Read the specified clock from the log or return cached data */
-int64_t replay_read_clock(ReplayClockKind kind);
+int64_t replay_read_clock(ReplayClockKind kind, int64_t raw_icount);
/*! Saves or reads the clock depending on the current replay mode. */
#define REPLAY_CLOCK(clock, value) \
- (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw()) \
: replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), cpu_get_icount_raw()) \
- : (value))
+ ? replay_save_clock((clock), (value), icount_get_raw()) \
+ : (value))
#define REPLAY_CLOCK_LOCKED(clock, value) \
- (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \
+ !icount_enabled() ? (value) : \
+ (replay_mode == REPLAY_MODE_PLAY \
+ ? replay_read_clock((clock), icount_get_raw_locked()) \
: replay_mode == REPLAY_MODE_RECORD \
- ? replay_save_clock((clock), (value), cpu_get_icount_raw_locked()) \
- : (value))
-
-/* Processing data from random generators */
-
-/* Saves the values from the random number generator */
-void replay_save_random(int ret, void *buf, size_t len);
-/* Loads the saved values for the random number generator */
-int replay_read_random(void *buf, size_t len);
+ ? replay_save_clock((clock), (value), icount_get_raw_locked()) \
+ : (value))
/* Events */
@@ -134,9 +111,14 @@ void replay_shutdown_request(ShutdownCause cause);
Returns 0 in PLAY mode if checkpoint was not found.
Returns 1 in all other cases. */
bool replay_checkpoint(ReplayCheckpoint checkpoint);
-/*! Used to determine that checkpoint is pending.
+/*! Used to determine that checkpoint or async event is pending.
Does not proceed to the next event in the log. */
-bool replay_has_checkpoint(void);
+bool replay_has_event(void);
+/*
+ * Processes the async events added to the queue (while recording)
+ * or reads the events from the file (while replaying).
+ */
+void replay_async_events(void);
/* Asynchronous events queue */
@@ -146,6 +128,8 @@ void replay_disable_events(void);
void replay_enable_events(void);
/*! Returns true when saving events is enabled */
bool replay_events_enabled(void);
+/* Flushes events queue */
+void replay_flush_events(void);
/*! Adds bottom half event to the queue */
void replay_bh_schedule_event(QEMUBH *bh);
/* Adds oneshot bottom half event to the queue */
@@ -165,7 +149,7 @@ uint64_t blkreplay_next_id(void);
/*! Registers char driver to save it's events */
void replay_register_char_driver(struct Chardev *chr);
/*! Saves write to char device event to the log */
-void replay_chr_be_write(struct Chardev *s, uint8_t *buf, int len);
+void replay_chr_be_write(struct Chardev *s, const uint8_t *buf, int len);
/*! Writes char write return value to the replay log. */
void replay_char_write_event_save(int res, int offset);
/*! Reads char write return value from the replay log. */
diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h
index 0b0d6d7598..ae436044a9 100644
--- a/include/sysemu/reset.h
+++ b/include/sysemu/reset.h
@@ -1,10 +1,126 @@
+/*
+ * Reset handlers.
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2016 Red Hat, Inc.
+ * Copyright (c) 2024 Linaro, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
#ifndef QEMU_SYSEMU_RESET_H
#define QEMU_SYSEMU_RESET_H
+#include "qapi/qapi-events-run-state.h"
+
typedef void QEMUResetHandler(void *opaque);
+/**
+ * qemu_register_resettable: Register an object to be reset
+ * @obj: object to be reset: it must implement the Resettable interface
+ *
+ * Register @obj on the list of objects which will be reset when the
+ * simulation is reset. These objects will be reset in the order
+ * they were added, using the three-phase Resettable protocol,
+ * so first all objects go through the enter phase, then all objects
+ * go through the hold phase, and then finally all go through the
+ * exit phase.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within any of the reset phase methods of @obj.
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_register_resettable(Object *obj);
+
+/**
+ * qemu_unregister_resettable: Unregister an object to be reset
+ * @obj: object to unregister
+ *
+ * Remove @obj from the list of objects which are reset when the
+ * simulation is reset. It must have been previously added to
+ * the list via qemu_register_resettable().
+ *
+ * We assume that the caller holds the BQL.
+ */
+void qemu_unregister_resettable(Object *obj);
+
+/**
+ * qemu_register_reset: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * Register @func on the list of functions which are called when the
+ * entire system is reset. Functions registered with this API and
+ * Resettable objects registered with qemu_register_resettable() are
+ * handled together, in the order in which they were registered.
+ * Functions registered with this API are called in the 'hold' phase
+ * of the 3-phase reset.
+ *
+ * In general this function should not be used in new code where possible;
+ * for instance, device model reset is better accomplished using the
+ * methods on DeviceState.
+ *
+ * It is not permitted to register or unregister reset functions or
+ * resettable objects from within the @func callback.
+ *
+ * We assume that the caller holds the BQL.
+ */
void qemu_register_reset(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_register_reset_nosnapshotload: Register a callback for system reset
+ * @func: function to call
+ * @opaque: opaque data to pass to @func
+ *
+ * This is the same as qemu_register_reset(), except that @func is
+ * not called if the reason that the system is being reset is to
+ * put it into a clean state prior to loading a snapshot (i.e. for
+ * SHUTDOWN_CAUSE_SNAPSHOT_LOAD).
+ */
+void qemu_register_reset_nosnapshotload(QEMUResetHandler *func, void *opaque);
+
+/**
+ * qemu_unregister_reset: Unregister a system reset callback
+ * @func: function registered with qemu_register_reset()
+ * @opaque: the same opaque data that was passed to qemu_register_reset()
+ *
+ * Undo the effects of a qemu_register_reset(). The @func and @opaque
+ * must both match the arguments originally used with qemu_register_reset().
+ *
+ * We assume that the caller holds the BQL.
+ */
void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
-void qemu_devices_reset(void);
+
+/**
+ * qemu_devices_reset: Perform a complete system reset
+ * @reason: reason for the reset
+ *
+ * This function performs the low-level work needed to do a complete reset
+ * of the system (calling all the callbacks registered with
+ * qemu_register_reset() and resetting all the Resettable objects registered
+ * with qemu_register_resettable()). It should only be called by the code in a
+ * MachineClass reset method.
+ *
+ * If you want to trigger a system reset from, for instance, a device
+ * model, don't use this function. Use qemu_system_reset_request().
+ */
+void qemu_devices_reset(ShutdownCause reason);
#endif
diff --git a/include/sysemu/rng-random.h b/include/sysemu/rng-random.h
index 38186fe83d..0fdc6c6974 100644
--- a/include/sysemu/rng-random.h
+++ b/include/sysemu/rng-random.h
@@ -15,8 +15,7 @@
#include "qom/object.h"
#define TYPE_RNG_RANDOM "rng-random"
-#define RNG_RANDOM(obj) OBJECT_CHECK(RngRandom, (obj), TYPE_RNG_RANDOM)
+OBJECT_DECLARE_SIMPLE_TYPE(RngRandom, RNG_RANDOM)
-typedef struct RngRandom RngRandom;
#endif
diff --git a/include/sysemu/rng.h b/include/sysemu/rng.h
index fa6eada78c..e383f87d20 100644
--- a/include/sysemu/rng.h
+++ b/include/sysemu/rng.h
@@ -17,18 +17,12 @@
#include "qom/object.h"
#define TYPE_RNG_BACKEND "rng-backend"
-#define RNG_BACKEND(obj) \
- OBJECT_CHECK(RngBackend, (obj), TYPE_RNG_BACKEND)
-#define RNG_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(RngBackendClass, (obj), TYPE_RNG_BACKEND)
-#define RNG_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(RngBackendClass, (klass), TYPE_RNG_BACKEND)
+OBJECT_DECLARE_TYPE(RngBackend, RngBackendClass,
+ RNG_BACKEND)
#define TYPE_RNG_BUILTIN "rng-builtin"
typedef struct RngRequest RngRequest;
-typedef struct RngBackendClass RngBackendClass;
-typedef struct RngBackend RngBackend;
typedef void (EntropyReceiveFunc)(void *opaque,
const void *data,
diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h
new file mode 100644
index 0000000000..0fc8ad6fdf
--- /dev/null
+++ b/include/sysemu/rtc.h
@@ -0,0 +1,58 @@
+/*
+ * RTC configuration and clock read
+ *
+ * Copyright (c) 2003-2021 QEMU contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef SYSEMU_RTC_H
+#define SYSEMU_RTC_H
+
+/**
+ * qemu_get_timedate: Get the current RTC time
+ * @tm: struct tm to fill in with RTC time
+ * @offset: offset in seconds to adjust the RTC time by before
+ * converting to struct tm format.
+ *
+ * This function fills in @tm with the current RTC time, as adjusted
+ * by @offset (for example, if @offset is 3600 then the returned time/date
+ * will be one hour further ahead than the current RTC time).
+ *
+ * The usual use is by RTC device models, which should call this function
+ * to find the time/date value that they should return to the guest
+ * when it reads the RTC registers.
+ *
+ * The behaviour of the clock whose value this function returns will
+ * depend on the -rtc command line option passed by the user.
+ */
+void qemu_get_timedate(struct tm *tm, time_t offset);
+
+/**
+ * qemu_timedate_diff: Return difference between a struct tm and the RTC
+ * @tm: struct tm containing the date/time to compare against
+ *
+ * Returns the difference in seconds between the RTC clock time
+ * and the date/time specified in @tm. For example, if @tm specifies
+ * a timestamp one hour further ahead than the current RTC time
+ * then this function will return 3600.
+ */
+time_t qemu_timedate_diff(struct tm *tm);
+
+#endif
diff --git a/include/sysemu/runstate-action.h b/include/sysemu/runstate-action.h
new file mode 100644
index 0000000000..db4e3099ae
--- /dev/null
+++ b/include/sysemu/runstate-action.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef RUNSTATE_ACTION_H
+#define RUNSTATE_ACTION_H
+
+#include "qapi/qapi-commands-run-state.h"
+
+/* in system/runstate-action.c */
+extern RebootAction reboot_action;
+extern ShutdownAction shutdown_action;
+extern PanicAction panic_action;
+
+#endif /* RUNSTATE_ACTION_H */
diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h
index f760094858..0117d243c4 100644
--- a/include/sysemu/runstate.h
+++ b/include/sysemu/runstate.h
@@ -6,32 +6,71 @@
bool runstate_check(RunState state);
void runstate_set(RunState new_state);
-int runstate_is_running(void);
+RunState runstate_get(void);
+bool runstate_is_running(void);
bool runstate_needs_reset(void);
-bool runstate_store(char *str, size_t size);
-typedef void VMChangeStateHandler(void *opaque, int running, RunState state);
+typedef void VMChangeStateHandler(void *opaque, bool running, RunState state);
VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
void *opaque);
VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
VMChangeStateHandler *cb, void *opaque, int priority);
+VMChangeStateEntry *
+qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb,
+ VMChangeStateHandler *prepare_cb,
+ void *opaque, int priority);
VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev,
VMChangeStateHandler *cb,
void *opaque);
+VMChangeStateEntry *qdev_add_vm_change_state_handler_full(
+ DeviceState *dev, VMChangeStateHandler *cb,
+ VMChangeStateHandler *prepare_cb, void *opaque);
void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
-void vm_state_notify(int running, RunState state);
+/**
+ * vm_state_notify: Notify the state of the VM
+ *
+ * @running: whether the VM is running or not.
+ * @state: the #RunState of the VM.
+ */
+void vm_state_notify(bool running, RunState state);
static inline bool shutdown_caused_by_guest(ShutdownCause cause)
{
return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN;
}
+/*
+ * In a "live" state, the vcpu clock is ticking, and the runstate notifiers
+ * think we are running.
+ */
+static inline bool runstate_is_live(RunState state)
+{
+ return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED;
+}
+
void vm_start(void);
-int vm_prepare_start(void);
+
+/**
+ * vm_prepare_start: Prepare for starting/resuming the VM
+ *
+ * @step_pending: whether any of the CPUs is about to be single-stepped by gdb
+ */
+int vm_prepare_start(bool step_pending);
+
+/**
+ * vm_resume: If @state is a live state, start the vm and set the state,
+ * else just set the state.
+ *
+ * @state: the state to restore
+ */
+void vm_resume(RunState state);
+
int vm_stop(RunState state);
int vm_stop_force_state(RunState state);
int vm_shutdown(void);
+void vm_set_suspended(bool suspended);
+bool vm_get_suspended(void);
typedef enum WakeupReason {
/* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
@@ -41,7 +80,6 @@ typedef enum WakeupReason {
QEMU_WAKEUP_REASON_OTHER,
} WakeupReason;
-void qemu_exit_preconfig_request(void);
void qemu_system_reset_request(ShutdownCause reason);
void qemu_system_suspend_request(void);
void qemu_register_suspend_notifier(Notifier *notifier);
@@ -50,6 +88,8 @@ void qemu_system_wakeup_request(WakeupReason reason, Error **errp);
void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
void qemu_register_wakeup_notifier(Notifier *notifier);
void qemu_register_wakeup_support(void);
+void qemu_system_shutdown_request_with_code(ShutdownCause reason,
+ int exit_code);
void qemu_system_shutdown_request(ShutdownCause reason);
void qemu_system_powerdown_request(void);
void qemu_register_powerdown_notifier(Notifier *notifier);
@@ -64,6 +104,7 @@ void qemu_system_killed(int signal, pid_t pid);
void qemu_system_reset(ShutdownCause reason);
void qemu_system_guest_panicked(GuestPanicInformation *info);
void qemu_system_guest_crashloaded(GuestPanicInformation *info);
+bool qemu_system_dump_in_progress(void);
#endif
diff --git a/include/sysemu/sev.h b/include/sysemu/sev.h
deleted file mode 100644
index 98c1ec8d38..0000000000
--- a/include/sysemu/sev.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * QEMU Secure Encrypted Virutualization (SEV) support
- *
- * Copyright: Advanced Micro Devices, 2016-2018
- *
- * Authors:
- * Brijesh Singh <brijesh.singh@amd.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMU_SEV_H
-#define QEMU_SEV_H
-
-#include "sysemu/kvm.h"
-
-void *sev_guest_init(const char *id);
-int sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len);
-#endif
diff --git a/include/sysemu/stats.h b/include/sysemu/stats.h
new file mode 100644
index 0000000000..42c236c795
--- /dev/null
+++ b/include/sysemu/stats.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef STATS_H
+#define STATS_H
+
+#include "qapi/qapi-types-stats.h"
+
+typedef void StatRetrieveFunc(StatsResultList **result, StatsTarget target,
+ strList *names, strList *targets, Error **errp);
+typedef void SchemaRetrieveFunc(StatsSchemaList **result, Error **errp);
+
+/*
+ * Register callbacks for the QMP query-stats command.
+ *
+ * @provider: stats provider checked against QMP command arguments
+ * @stats_fn: routine to query stats:
+ * @schema_fn: routine to query stat schemas:
+ */
+void add_stats_callbacks(StatsProvider provider,
+ StatRetrieveFunc *stats_fn,
+ SchemaRetrieveFunc *schemas_fn);
+
+/*
+ * Helper routines for adding stats entries to the results lists.
+ */
+void add_stats_entry(StatsResultList **, StatsProvider, const char *id,
+ StatsList *stats_list);
+void add_stats_schema(StatsSchemaList **, StatsProvider, StatsTarget,
+ StatsSchemaValueList *);
+
+/*
+ * True if a string matches the filter passed to the stats_fn callback,
+ * false otherwise.
+ *
+ * Note that an empty list means no filtering, i.e. all strings will
+ * return true.
+ */
+bool apply_str_list_filter(const char *string, strList *list);
+
+#endif /* STATS_H */
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 4b6a5c459c..5b4397eeb8 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -8,22 +8,23 @@
/* vl.c */
-extern const char *bios_name;
extern int only_migratable;
extern const char *qemu_name;
extern QemuUUID qemu_uuid;
extern bool qemu_uuid_set;
-void qemu_add_data_dir(const char *path);
+const char *qemu_get_vm_name(void);
void qemu_add_exit_notifier(Notifier *notify);
void qemu_remove_exit_notifier(Notifier *notify);
-extern bool machine_init_done;
-
void qemu_add_machine_init_done_notifier(Notifier *notify);
void qemu_remove_machine_init_done_notifier(Notifier *notify);
+void configure_rtc(QemuOpts *opts);
+
+void qemu_init_subsystems(void);
+
extern int autostart;
typedef enum {
@@ -33,20 +34,15 @@ typedef enum {
} VGAInterfaceType;
extern int vga_interface_type;
+extern bool vga_interface_created;
extern int graphic_width;
extern int graphic_height;
extern int graphic_depth;
extern int display_opengl;
extern const char *keyboard_layout;
-extern int win2k_install_hack;
-extern int alt_grab;
-extern int ctrl_grab;
extern int graphic_rotate;
-extern int no_shutdown;
extern int old_param;
-extern int boot_menu;
-extern bool boot_strict;
extern uint8_t *boot_splash_filedata;
extern bool enable_mlock;
extern bool enable_cpu_pm;
@@ -64,17 +60,10 @@ extern int nb_option_roms;
extern const char *prom_envs[MAX_PROM_ENVS];
extern unsigned int nb_prom_envs;
-/* pcie aer error injection */
-void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict);
-
/* serial ports */
/* Return the Chardev for serial port i, or NULL if none */
Chardev *serial_hd(int i);
-/* return the number of serial ports defined by the user. serial_hd(i)
- * will always return NULL for any i which is greater than or equal to this.
- */
-int serial_max_hds(void);
/* parallel ports */
@@ -82,8 +71,6 @@ int serial_max_hds(void);
extern Chardev *parallel_hds[MAX_PARALLEL_PORTS];
-void hmp_info_usb(Monitor *mon, const QDict *qdict);
-
void add_boot_device_path(int32_t bootindex, DeviceState *dev,
const char *suffix);
char *get_boot_devices_list(size_t *size);
@@ -107,13 +94,11 @@ typedef void QEMUBootSetHandler(void *opaque, const char *boot_order,
void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque);
void qemu_boot_set(const char *boot_order, Error **errp);
-QemuOpts *qemu_get_machine_opts(void);
-
bool defaults_enabled(void);
-void qemu_init(int argc, char **argv, char **envp);
-void qemu_main_loop(void);
-void qemu_cleanup(void);
+void qemu_init(int argc, char **argv);
+int qemu_main_loop(void);
+void qemu_cleanup(int);
extern QemuOptsList qemu_legacy_drive_opts;
extern QemuOptsList qemu_common_drive_opts;
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
index d9d3ca8559..5e2ca9aab3 100644
--- a/include/sysemu/tcg.h
+++ b/include/sysemu/tcg.h
@@ -5,10 +5,11 @@
* See the COPYING file in the top-level directory.
*/
+/* header to be included in non-TCG-specific code */
+
#ifndef SYSEMU_TCG_H
#define SYSEMU_TCG_H
-void tcg_exec_init(unsigned long tb_size);
#ifdef CONFIG_TCG
extern bool tcg_allowed;
#define tcg_enabled() (tcg_allowed)
diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h
index 730c61ac97..1ee568b3b6 100644
--- a/include/sysemu/tpm.h
+++ b/include/sysemu/tpm.h
@@ -15,7 +15,9 @@
#include "qapi/qapi-types-tpm.h"
#include "qom/object.h"
-int tpm_config_parse(QemuOptsList *opts_list, const char *optarg);
+#ifdef CONFIG_TPM
+
+int tpm_config_parse(QemuOptsList *opts_list, const char *optstr);
int tpm_init(void);
void tpm_cleanup(void);
@@ -26,27 +28,27 @@ typedef enum TPMVersion {
} TPMVersion;
#define TYPE_TPM_IF "tpm-if"
-#define TPM_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(TPMIfClass, (klass), TYPE_TPM_IF)
-#define TPM_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(TPMIfClass, (obj), TYPE_TPM_IF)
+typedef struct TPMIfClass TPMIfClass;
+DECLARE_CLASS_CHECKERS(TPMIfClass, TPM_IF,
+ TYPE_TPM_IF)
#define TPM_IF(obj) \
INTERFACE_CHECK(TPMIf, (obj), TYPE_TPM_IF)
typedef struct TPMIf TPMIf;
-typedef struct TPMIfClass {
+struct TPMIfClass {
InterfaceClass parent_class;
enum TpmModel model;
void (*request_completed)(TPMIf *obj, int ret);
enum TPMVersion (*get_version)(TPMIf *obj);
-} TPMIfClass;
+};
#define TYPE_TPM_TIS_ISA "tpm-tis"
#define TYPE_TPM_TIS_SYSBUS "tpm-tis-device"
#define TYPE_TPM_CRB "tpm-crb"
#define TYPE_TPM_SPAPR "tpm-spapr"
+#define TYPE_TPM_TIS_I2C "tpm-tis-i2c"
#define TPM_IS_TIS_ISA(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_ISA)
@@ -56,6 +58,8 @@ typedef struct TPMIfClass {
object_dynamic_cast(OBJECT(chr), TYPE_TPM_CRB)
#define TPM_IS_SPAPR(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_SPAPR)
+#define TPM_IS_TIS_I2C(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_I2C)
/* returns NULL unless there is exactly one TPM device */
static inline TPMIf *tpm_find(void)
@@ -74,4 +78,17 @@ static inline TPMVersion tpm_get_version(TPMIf *ti)
return TPM_IF_GET_CLASS(ti)->get_version(ti);
}
+#else /* CONFIG_TPM */
+
+#define tpm_init() (0)
+#define tpm_cleanup()
+
+/* needed for an alignment check in non-tpm code */
+static inline Object *TPM_IS_CRB(Object *obj)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_TPM */
+
#endif /* QEMU_TPM_H */
diff --git a/include/sysemu/tpm_backend.h b/include/sysemu/tpm_backend.h
index 9e7451fb52..7fabafefee 100644
--- a/include/sysemu/tpm_backend.h
+++ b/include/sysemu/tpm_backend.h
@@ -18,16 +18,12 @@
#include "sysemu/tpm.h"
#include "qapi/error.h"
+#ifdef CONFIG_TPM
+
#define TYPE_TPM_BACKEND "tpm-backend"
-#define TPM_BACKEND(obj) \
- OBJECT_CHECK(TPMBackend, (obj), TYPE_TPM_BACKEND)
-#define TPM_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(TPMBackendClass, (obj), TYPE_TPM_BACKEND)
-#define TPM_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(TPMBackendClass, (klass), TYPE_TPM_BACKEND)
+OBJECT_DECLARE_TYPE(TPMBackend, TPMBackendClass,
+ TPM_BACKEND)
-typedef struct TPMBackendClass TPMBackendClass;
-typedef struct TPMBackend TPMBackend;
typedef struct TPMBackendCmd {
uint8_t locty;
@@ -119,7 +115,7 @@ int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize);
/**
* tpm_backend_had_startup_error:
- * @s: the backend to query for a statup error
+ * @s: the backend to query for a startup error
*
* Check whether the backend had an error during startup. Returns
* false if no error occurred and the backend can be used, true
@@ -215,4 +211,6 @@ TPMInfo *tpm_backend_query_tpm(TPMBackend *s);
TPMBackend *qemu_find_tpm_be(const char *id);
-#endif
+#endif /* CONFIG_TPM */
+
+#endif /* TPM_BACKEND_H */
diff --git a/include/sysemu/tpm_util.h b/include/sysemu/tpm_util.h
index 63e872c3b2..08f05172a7 100644
--- a/include/sysemu/tpm_util.h
+++ b/include/sysemu/tpm_util.h
@@ -8,7 +8,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/sysemu/vhost-user-backend.h b/include/sysemu/vhost-user-backend.h
index 9abf8f06a1..327b0b84f1 100644
--- a/include/sysemu/vhost-user-backend.h
+++ b/include/sysemu/vhost-user-backend.h
@@ -22,19 +22,10 @@
#include "io/channel.h"
#define TYPE_VHOST_USER_BACKEND "vhost-user-backend"
-#define VHOST_USER_BACKEND(obj) \
- OBJECT_CHECK(VhostUserBackend, (obj), TYPE_VHOST_USER_BACKEND)
-#define VHOST_USER_BACKEND_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VhostUserBackendClass, (obj), TYPE_VHOST_USER_BACKEND)
-#define VHOST_USER_BACKEND_CLASS(klass) \
- OBJECT_CLASS_CHECK(VhostUserBackendClass, (klass), TYPE_VHOST_USER_BACKEND)
-
-typedef struct VhostUserBackend VhostUserBackend;
-typedef struct VhostUserBackendClass VhostUserBackendClass;
-
-struct VhostUserBackendClass {
- ObjectClass parent_class;
-};
+OBJECT_DECLARE_SIMPLE_TYPE(VhostUserBackend,
+ VHOST_USER_BACKEND)
+
+
struct VhostUserBackend {
/* private */
diff --git a/include/sysemu/watchdog.h b/include/sysemu/watchdog.h
index a08d16380d..745c89b02b 100644
--- a/include/sysemu/watchdog.h
+++ b/include/sysemu/watchdog.h
@@ -25,21 +25,8 @@
#include "qemu/queue.h"
#include "qapi/qapi-types-run-state.h"
-struct WatchdogTimerModel {
- QLIST_ENTRY(WatchdogTimerModel) entry;
-
- /* Short name of the device - used to select it on the command line. */
- const char *wdt_name;
- /* Longer description (eg. manufacturer and full model number). */
- const char *wdt_description;
-};
-typedef struct WatchdogTimerModel WatchdogTimerModel;
-
/* in hw/watchdog.c */
-int select_watchdog(const char *p);
-int select_watchdog_action(const char *action);
WatchdogAction get_watchdog_action(void);
-void watchdog_add_model(WatchdogTimerModel *model);
void watchdog_perform_action(void);
#endif /* QEMU_WATCHDOG_H */
diff --git a/include/sysemu/whpx.h b/include/sysemu/whpx.h
index a84b49e749..781ca5b2b6 100644
--- a/include/sysemu/whpx.h
+++ b/include/sysemu/whpx.h
@@ -10,36 +10,25 @@
*
*/
+/* header to be included in non-WHPX-specific code */
+
#ifndef QEMU_WHPX_H
#define QEMU_WHPX_H
-
-int whpx_init_vcpu(CPUState *cpu);
-int whpx_vcpu_exec(CPUState *cpu);
-void whpx_destroy_vcpu(CPUState *cpu);
-void whpx_vcpu_kick(CPUState *cpu);
-
-
-void whpx_cpu_synchronize_state(CPUState *cpu);
-void whpx_cpu_synchronize_post_reset(CPUState *cpu);
-void whpx_cpu_synchronize_post_init(CPUState *cpu);
-void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu);
+#ifdef NEED_CPU_H
#ifdef CONFIG_WHPX
int whpx_enabled(void);
+bool whpx_apic_in_platform(void);
#else /* CONFIG_WHPX */
#define whpx_enabled() (0)
+#define whpx_apic_in_platform() (0)
#endif /* CONFIG_WHPX */
-/* state subset only touched by the VCPU itself during runtime */
-#define WHPX_SET_RUNTIME_STATE 1
-/* state subset modified during VCPU reset */
-#define WHPX_SET_RESET_STATE 2
-/* full state set, modified during initialization or on vmload */
-#define WHPX_SET_FULL_STATE 3
+#endif /* NEED_CPU_H */
#endif /* QEMU_WHPX_H */
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
index c8e7c2f6cf..10c2e3082a 100644
--- a/include/sysemu/xen-mapcache.h
+++ b/include/sysemu/xen-mapcache.h
@@ -10,10 +10,11 @@
#define XEN_MAPCACHE_H
#include "exec/cpu-common.h"
+#include "sysemu/xen.h"
typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
ram_addr_t size);
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_IS_POSSIBLE
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h
index 2c2c429ea8..a9f591f26d 100644
--- a/include/sysemu/xen.h
+++ b/include/sysemu/xen.h
@@ -5,9 +5,17 @@
* See the COPYING file in the top-level directory.
*/
+/* header to be included in non-Xen-specific code */
+
#ifndef SYSEMU_XEN_H
#define SYSEMU_XEN_H
+#ifdef CONFIG_USER_ONLY
+#error Cannot include sysemu/xen.h from user emulation
+#endif
+
+#include "exec/cpu-common.h"
+
#ifdef NEED_CPU_H
# ifdef CONFIG_XEN
# define CONFIG_XEN_IS_POSSIBLE
@@ -22,16 +30,13 @@ extern bool xen_allowed;
#define xen_enabled() (xen_allowed)
-#ifndef CONFIG_USER_ONLY
void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length);
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr, Error **errp);
-#endif
#else /* !CONFIG_XEN_IS_POSSIBLE */
#define xen_enabled() 0
-#ifndef CONFIG_USER_ONLY
static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
{
/* nothing */
@@ -41,7 +46,6 @@ static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
{
g_assert_not_reached();
}
-#endif
#endif /* CONFIG_XEN_IS_POSSIBLE */
diff --git a/include/tcg/debug-assert.h b/include/tcg/debug-assert.h
new file mode 100644
index 0000000000..596765a3d2
--- /dev/null
+++ b/include/tcg/debug-assert.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define tcg_debug_assert
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_DEBUG_ASSERT_H
+#define TCG_DEBUG_ASSERT_H
+
+#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
+# define tcg_debug_assert(X) do { assert(X); } while (0)
+#else
+# define tcg_debug_assert(X) \
+ do { if (!(X)) { __builtin_unreachable(); } } while (0)
+#endif
+
+#endif
diff --git a/include/tcg/debuginfo.h b/include/tcg/debuginfo.h
new file mode 100644
index 0000000000..858535b5da
--- /dev/null
+++ b/include/tcg/debuginfo.h
@@ -0,0 +1,79 @@
+/*
+ * Debug information support.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_DEBUGINFO_H
+#define TCG_DEBUGINFO_H
+
+#include "qemu/bitops.h"
+
+/*
+ * Debuginfo describing a certain address.
+ */
+struct debuginfo_query {
+ uint64_t address; /* Input: address. */
+ int flags; /* Input: debuginfo subset. */
+ const char *symbol; /* Symbol that the address is part of. */
+ uint64_t offset; /* Offset from the symbol. */
+ const char *file; /* Source file associated with the address. */
+ int line; /* Line number in the source file. */
+};
+
+/*
+ * Debuginfo subsets.
+ */
+#define DEBUGINFO_SYMBOL BIT(1)
+#define DEBUGINFO_LINE BIT(2)
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LIBDW)
+/*
+ * Load debuginfo for the specified guest ELF image.
+ * Return true on success, false on failure.
+ */
+void debuginfo_report_elf(const char *name, int fd, uint64_t bias);
+
+/*
+ * Take the debuginfo lock.
+ */
+void debuginfo_lock(void);
+
+/*
+ * Fill each on N Qs with the debuginfo about Q->ADDRESS as specified by
+ * Q->FLAGS:
+ *
+ * - DEBUGINFO_SYMBOL: update Q->SYMBOL and Q->OFFSET. If symbol debuginfo is
+ * missing, then leave them as is.
+ * - DEBUINFO_LINE: update Q->FILE and Q->LINE. If line debuginfo is missing,
+ * then leave them as is.
+ *
+ * This function must be called under the debuginfo lock. The results can be
+ * accessed only until the debuginfo lock is released.
+ */
+void debuginfo_query(struct debuginfo_query *q, size_t n);
+
+/*
+ * Release the debuginfo lock.
+ */
+void debuginfo_unlock(void);
+#else
+static inline void debuginfo_report_elf(const char *image_name, int image_fd,
+ uint64_t load_bias)
+{
+}
+
+static inline void debuginfo_lock(void)
+{
+}
+
+static inline void debuginfo_query(struct debuginfo_query *q, size_t n)
+{
+}
+
+static inline void debuginfo_unlock(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/tcg/helper-info.h b/include/tcg/helper-info.h
new file mode 100644
index 0000000000..7c27d6164a
--- /dev/null
+++ b/include/tcg/helper-info.h
@@ -0,0 +1,64 @@
+/*
+ * TCG Helper Information Structure
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_HELPER_INFO_H
+#define TCG_HELPER_INFO_H
+
+#ifdef CONFIG_TCG_INTERPRETER
+#include <ffi.h>
+#endif
+
+/*
+ * Describe the calling convention of a given argument type.
+ */
+typedef enum {
+ TCG_CALL_RET_NORMAL, /* by registers */
+ TCG_CALL_RET_BY_REF, /* for i128, by reference */
+ TCG_CALL_RET_BY_VEC, /* for i128, by vector register */
+} TCGCallReturnKind;
+
+typedef enum {
+ TCG_CALL_ARG_NORMAL, /* by registers (continuing onto stack) */
+ TCG_CALL_ARG_EVEN, /* like normal, but skipping odd slots */
+ TCG_CALL_ARG_EXTEND, /* for i32, as a sign/zero-extended i64 */
+ TCG_CALL_ARG_EXTEND_U, /* ... as a zero-extended i64 */
+ TCG_CALL_ARG_EXTEND_S, /* ... as a sign-extended i64 */
+ TCG_CALL_ARG_BY_REF, /* for i128, by reference, first */
+ TCG_CALL_ARG_BY_REF_N, /* ... by reference, subsequent */
+} TCGCallArgumentKind;
+
+typedef struct TCGCallArgumentLoc {
+ TCGCallArgumentKind kind : 8;
+ unsigned arg_slot : 8;
+ unsigned ref_slot : 8;
+ unsigned arg_idx : 4;
+ unsigned tmp_subindex : 2;
+} TCGCallArgumentLoc;
+
+struct TCGHelperInfo {
+ void *func;
+ const char *name;
+
+ /* Used with g_once_init_enter. */
+#ifdef CONFIG_TCG_INTERPRETER
+ ffi_cif *cif;
+#else
+ uintptr_t init;
+#endif
+
+ unsigned typemask : 32;
+ unsigned flags : 8;
+ unsigned nr_in : 8;
+ unsigned nr_out : 8;
+ TCGCallReturnKind out_kind : 8;
+
+ /* Maximum physical arguments are constrained by TCG_TYPE_I128. */
+ TCGCallArgumentLoc in[MAX_CALL_IARGS * (128 / TCG_TARGET_REG_BITS)];
+};
+
+#endif /* TCG_HELPER_INFO_H */
diff --git a/include/tcg/insn-start-words.h b/include/tcg/insn-start-words.h
new file mode 100644
index 0000000000..50c18bd326
--- /dev/null
+++ b/include/tcg/insn-start-words.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define TARGET_INSN_START_WORDS
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TARGET_INSN_START_WORDS
+
+#include "cpu.h"
+
+#ifndef TARGET_INSN_START_EXTRA_WORDS
+# define TARGET_INSN_START_WORDS 1
+#else
+# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
+#endif
+
+#endif /* TARGET_INSN_START_WORDS */
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
new file mode 100644
index 0000000000..641b9749ff
--- /dev/null
+++ b/include/tcg/oversized-guest.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define TCG_OVERSIZED_GUEST
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef EXEC_TCG_OVERSIZED_GUEST_H
+#define EXEC_TCG_OVERSIZED_GUEST_H
+
+#include "tcg-target-reg-bits.h"
+#include "cpu-param.h"
+
+/*
+ * Oversized TCG guests make things like MTTCG hard
+ * as we can't use atomics for cputlb updates.
+ */
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+#define TCG_OVERSIZED_GUEST 1
+#else
+#define TCG_OVERSIZED_GUEST 0
+#endif
+
+#endif
diff --git a/include/tcg/perf.h b/include/tcg/perf.h
new file mode 100644
index 0000000000..c96b5920a3
--- /dev/null
+++ b/include/tcg/perf.h
@@ -0,0 +1,49 @@
+/*
+ * Linux perf perf-<pid>.map and jit-<pid>.dump integration.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TCG_PERF_H
+#define TCG_PERF_H
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
+/* Start writing perf-<pid>.map. */
+void perf_enable_perfmap(void);
+
+/* Start writing jit-<pid>.dump. */
+void perf_enable_jitdump(void);
+
+/* Add information about TCG prologue to profiler maps. */
+void perf_report_prologue(const void *start, size_t size);
+
+/* Add information about JITted guest code to profiler maps. */
+void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start);
+
+/* Stop writing perf-<pid>.map and/or jit-<pid>.dump. */
+void perf_exit(void);
+#else
+static inline void perf_enable_perfmap(void)
+{
+}
+
+static inline void perf_enable_jitdump(void)
+{
+}
+
+static inline void perf_report_prologue(const void *start, size_t size)
+{
+}
+
+static inline void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start)
+{
+}
+
+static inline void perf_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/tcg/startup.h b/include/tcg/startup.h
new file mode 100644
index 0000000000..f71305765c
--- /dev/null
+++ b/include/tcg/startup.h
@@ -0,0 +1,58 @@
+/*
+ * Tiny Code Generator for QEMU: definitions used by runtime startup
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_STARTUP_H
+#define TCG_STARTUP_H
+
+/**
+ * tcg_init: Initialize the TCG runtime
+ * @tb_size: translation buffer size
+ * @splitwx: use separate rw and rx mappings
+ * @max_cpus: number of vcpus in system mode
+ *
+ * Allocate and initialize TCG resources, especially the JIT buffer.
+ * In user-only mode, @max_cpus is unused.
+ */
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
+
+/**
+ * tcg_register_thread: Register this thread with the TCG runtime
+ *
+ * All TCG threads except the parent (i.e. the one that called the TCG
+ * accelerator's init_machine() method) must register with this
+ * function before initiating translation.
+ */
+void tcg_register_thread(void);
+
+/**
+ * tcg_prologue_init(): Generate the code for the TCG prologue
+ *
+ * In softmmu this is done automatically as part of the TCG
+ * accelerator's init_machine() method, but for user-mode, the
+ * user-mode code must call this function after it has loaded
+ * the guest binary and the value of guest_base is known.
+ */
+void tcg_prologue_init(void);
+
+#endif
diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h
new file mode 100644
index 0000000000..5cadbd6ff2
--- /dev/null
+++ b/include/tcg/tcg-cond.h
@@ -0,0 +1,133 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_COND_H
+#define TCG_COND_H
+
+/*
+ * Conditions. Note that these are laid out for easy manipulation by
+ * the functions below:
+ * bit 0 is used for inverting;
+ * bit 1 is used for conditions that need swapping (signed/unsigned).
+ * bit 2 is used with bit 1 for swapping.
+ * bit 3 is used for unsigned conditions.
+ */
+typedef enum {
+ /* non-signed */
+ TCG_COND_NEVER = 0 | 0 | 0 | 0,
+ TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
+
+ /* equality */
+ TCG_COND_EQ = 8 | 0 | 0 | 0,
+ TCG_COND_NE = 8 | 0 | 0 | 1,
+
+ /* "test" i.e. and then compare vs 0 */
+ TCG_COND_TSTEQ = 8 | 4 | 0 | 0,
+ TCG_COND_TSTNE = 8 | 4 | 0 | 1,
+
+ /* signed */
+ TCG_COND_LT = 0 | 0 | 2 | 0,
+ TCG_COND_GE = 0 | 0 | 2 | 1,
+ TCG_COND_GT = 0 | 4 | 2 | 0,
+ TCG_COND_LE = 0 | 4 | 2 | 1,
+
+ /* unsigned */
+ TCG_COND_LTU = 8 | 0 | 2 | 0,
+ TCG_COND_GEU = 8 | 0 | 2 | 1,
+ TCG_COND_GTU = 8 | 4 | 2 | 0,
+ TCG_COND_LEU = 8 | 4 | 2 | 1,
+} TCGCond;
+
+/* Invert the sense of the comparison. */
+static inline TCGCond tcg_invert_cond(TCGCond c)
+{
+ return (TCGCond)(c ^ 1);
+}
+
+/* Swap the operands in a comparison. */
+static inline TCGCond tcg_swap_cond(TCGCond c)
+{
+ return (TCGCond)(c ^ ((c & 2) << 1));
+}
+
+/* Must a comparison be considered signed? */
+static inline bool is_signed_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == 2;
+}
+
+/* Must a comparison be considered unsigned? */
+static inline bool is_unsigned_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == (8 | 2);
+}
+
+/* Must a comparison be considered a test? */
+static inline bool is_tst_cond(TCGCond c)
+{
+ return (c | 1) == TCG_COND_TSTNE;
+}
+
+/* Create an "unsigned" version of a "signed" comparison. */
+static inline TCGCond tcg_unsigned_cond(TCGCond c)
+{
+ return is_signed_cond(c) ? (TCGCond)(c + 8) : c;
+}
+
+/* Create a "signed" version of an "unsigned" comparison. */
+static inline TCGCond tcg_signed_cond(TCGCond c)
+{
+ return is_unsigned_cond(c) ? (TCGCond)(c - 8) : c;
+}
+
+/* Create the eq/ne version of a tsteq/tstne comparison. */
+static inline TCGCond tcg_tst_eqne_cond(TCGCond c)
+{
+ return is_tst_cond(c) ? (TCGCond)(c - 4) : c;
+}
+
+/* Create the lt/ge version of a tstne/tsteq comparison of the sign. */
+static inline TCGCond tcg_tst_ltge_cond(TCGCond c)
+{
+ return is_tst_cond(c) ? (TCGCond)(c ^ 0xf) : c;
+}
+
+/*
+ * Create a "high" version of a double-word comparison.
+ * This removes equality from a LTE or GTE comparison.
+ */
+static inline TCGCond tcg_high_cond(TCGCond c)
+{
+ switch (c) {
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ return (TCGCond)(c ^ (4 | 1));
+ default:
+ return c;
+ }
+}
+
+#endif /* TCG_COND_H */
diff --git a/include/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h
index 0224ac3e78..704bd86454 100644
--- a/include/tcg/tcg-gvec-desc.h
+++ b/include/tcg/tcg-gvec-desc.h
@@ -20,29 +20,41 @@
#ifndef TCG_TCG_GVEC_DESC_H
#define TCG_TCG_GVEC_DESC_H
-/* ??? These bit widths are set for ARM SVE, maxing out at 256 byte vectors. */
-#define SIMD_OPRSZ_SHIFT 0
-#define SIMD_OPRSZ_BITS 5
+/*
+ * This configuration allows MAXSZ to represent 2048 bytes, and
+ * OPRSZ to match MAXSZ, or represent the smaller values 8, 16, or 32.
+ *
+ * Encode this with:
+ * 0, 1, 3 -> 8, 16, 32
+ * 2 -> maxsz
+ *
+ * This steals the input that would otherwise map to 24 to match maxsz.
+ */
+#define SIMD_MAXSZ_SHIFT 0
+#define SIMD_MAXSZ_BITS 8
-#define SIMD_MAXSZ_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS)
-#define SIMD_MAXSZ_BITS 5
+#define SIMD_OPRSZ_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS)
+#define SIMD_OPRSZ_BITS 2
-#define SIMD_DATA_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS)
+#define SIMD_DATA_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS)
#define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT)
/* Create a descriptor from components. */
uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data);
-/* Extract the operation size from a descriptor. */
-static inline intptr_t simd_oprsz(uint32_t desc)
+/* Extract the max vector size from a descriptor. */
+static inline intptr_t simd_maxsz(uint32_t desc)
{
- return (extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS) + 1) * 8;
+ return extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) * 8 + 8;
}
-/* Extract the max vector size from a descriptor. */
-static inline intptr_t simd_maxsz(uint32_t desc)
+/* Extract the operation size from a descriptor. */
+static inline intptr_t simd_oprsz(uint32_t desc)
{
- return (extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) + 1) * 8;
+ uint32_t f = extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS);
+ intptr_t o = f * 8 + 8;
+ intptr_t m = simd_maxsz(desc);
+ return f == 2 ? m : o;
}
/* Extract the operation-specific data from a descriptor. */
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
new file mode 100644
index 0000000000..6ccfe9131d
--- /dev/null
+++ b/include/tcg/tcg-ldst.h
@@ -0,0 +1,63 @@
+/*
+ * Memory helpers that will be used by TCG generated code.
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_LDST_H
+#define TCG_LDST_H
+
+/* Value zero-extended to tcg register size. */
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+
+/* Value sign-extended to tcg register size. */
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr);
+
+/*
+ * Value extended to at least uint32_t, so that some ABIs do not require
+ * zero-extension from uint8_t or uint16_t.
+ */
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr);
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr);
+
+#endif /* TCG_LDST_H */
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
new file mode 100644
index 0000000000..2d932a515e
--- /dev/null
+++ b/include/tcg/tcg-op-common.h
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Target independent opcode generation functions.
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ */
+
+#ifndef TCG_TCG_OP_COMMON_H
+#define TCG_TCG_OP_COMMON_H
+
+#include "tcg/tcg.h"
+#include "exec/helper-proto-common.h"
+#include "exec/helper-gen-common.h"
+
+TCGv_i32 tcg_constant_i32(int32_t val);
+TCGv_i64 tcg_constant_i64(int64_t val);
+TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
+TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
+
+TCGv_i32 tcg_temp_new_i32(void);
+TCGv_i64 tcg_temp_new_i64(void);
+TCGv_ptr tcg_temp_new_ptr(void);
+TCGv_i128 tcg_temp_new_i128(void);
+TCGv_vec tcg_temp_new_vec(TCGType type);
+TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
+
+TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t off, const char *name);
+TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t off, const char *name);
+TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name);
+
+/* Generic ops. */
+
+void gen_set_label(TCGLabel *l);
+void tcg_gen_br(TCGLabel *l);
+void tcg_gen_mb(TCGBar);
+
+/**
+ * tcg_gen_exit_tb() - output exit_tb TCG operation
+ * @tb: The TranslationBlock from which we are exiting
+ * @idx: Direct jump slot index, or exit request
+ *
+ * See tcg/README for more info about this TCG operation.
+ * See also tcg.h and the block comment above TB_EXIT_MASK.
+ *
+ * For a normal exit from the TB, back to the main loop, @tb should
+ * be NULL and @idx should be 0. Otherwise, @tb should be valid and
+ * @idx should be one of the TB_EXIT_ values.
+ */
+void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx);
+
+/**
+ * tcg_gen_goto_tb() - output goto_tb TCG operation
+ * @idx: Direct jump slot index (0 or 1)
+ *
+ * See tcg/README for more info about this TCG operation.
+ *
+ * NOTE: In system emulation, direct jumps with goto_tb are only safe within
+ * the pages this TB resides in because we don't take care of direct jumps when
+ * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
+ * static address translation, so the destination address is always valid, TBs
+ * are always invalidated properly, and direct jumps are reset when mapping
+ * changes.
+ */
+void tcg_gen_goto_tb(unsigned idx);
+
+/**
+ * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
+ * @addr: Guest address of the target TB
+ *
+ * If the TB is not valid, jump to the epilogue.
+ *
+ * This operation is optional. If the TCG backend does not implement goto_ptr,
+ * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
+ */
+void tcg_gen_lookup_and_goto_ptr(void);
+
+void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr);
+void tcg_gen_plugin_cb_end(void);
+
+/* 32 bit ops */
+
+void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg);
+void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
+void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
+void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
+ unsigned int ofs);
+void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
+void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
+void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
+void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
+ TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
+void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
+void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc);
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
+void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
+
+/* Replicate a value of size @vece from @in to all the lanes in @out */
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
+
+void tcg_gen_discard_i32(TCGv_i32 arg);
+void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg);
+
+void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg);
+
+/* 64 bit ops */
+
+void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
+void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
+void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
+void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
+ unsigned int ofs);
+void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
+void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
+void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
+void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
+ TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
+void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc);
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
+void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
+
+/* Replicate a value of size @vece from @in to all the lanes in @out */
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
+
+void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+
+void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+
+void tcg_gen_discard_i64(TCGv_i64 arg);
+void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
+void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg);
+
+
+/* Size changing operations. */
+
+void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
+void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
+void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
+void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
+void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
+void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
+void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi);
+
+void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
+void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
+
+/* 128 bit ops */
+
+void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset);
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset);
+
+/* Local load/store bit ops */
+
+void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
+void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
+
+/* Atomic ops */
+
+void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
+ TCGv_i128, TCGArg, MemOp, TCGType);
+
+void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
+ TCGv_i128, TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
+ TCGArg, MemOp, TCGType);
+void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
+ TCGArg, MemOp, TCGType);
+
+/* Vector ops */
+
+void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
+void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
+void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
+void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
+void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
+void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
+void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+
+void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+
+void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+
+void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
+ TCGv_vec a, TCGv_vec b);
+
+void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
+ TCGv_vec b, TCGv_vec c);
+void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
+ TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d);
+
+void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
+void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
+void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
+
+/* Host pointer ops */
+
+#if UINTPTR_MAX == UINT32_MAX
+# define PTR i32
+# define NAT TCGv_i32
+#else
+# define PTR i64
+# define NAT TCGv_i64
+#endif
+
+TCGv_ptr tcg_constant_ptr_int(intptr_t x);
+#define tcg_constant_ptr(X) tcg_constant_ptr_int((intptr_t)(X))
+
+static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
+{
+ glue(tcg_gen_ld_,PTR)((NAT)r, a, o);
+}
+
+static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
+{
+ glue(tcg_gen_st_, PTR)((NAT)r, a, o);
+}
+
+static inline void tcg_gen_discard_ptr(TCGv_ptr a)
+{
+ glue(tcg_gen_discard_,PTR)((NAT)a);
+}
+
+static inline void tcg_gen_add_ptr(TCGv_ptr r, TCGv_ptr a, TCGv_ptr b)
+{
+ glue(tcg_gen_add_,PTR)((NAT)r, (NAT)a, (NAT)b);
+}
+
+static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b)
+{
+ glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b);
+}
+
+static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s)
+{
+ glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s);
+}
+
+static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s)
+{
+ glue(tcg_gen_movi_,PTR)((NAT)d, s);
+}
+
+static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
+ intptr_t b, TCGLabel *label)
+{
+ glue(tcg_gen_brcondi_,PTR)(cond, (NAT)a, b, label);
+}
+
+static inline void tcg_gen_ext_i32_ptr(TCGv_ptr r, TCGv_i32 a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_mov_i32((NAT)r, a);
+#else
+ tcg_gen_ext_i32_i64((NAT)r, a);
+#endif
+}
+
+static inline void tcg_gen_trunc_i64_ptr(TCGv_ptr r, TCGv_i64 a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_extrl_i64_i32((NAT)r, a);
+#else
+ tcg_gen_mov_i64((NAT)r, a);
+#endif
+}
+
+static inline void tcg_gen_extu_ptr_i64(TCGv_i64 r, TCGv_ptr a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_extu_i32_i64(r, (NAT)a);
+#else
+ tcg_gen_mov_i64(r, (NAT)a);
+#endif
+}
+
+static inline void tcg_gen_trunc_ptr_i32(TCGv_i32 r, TCGv_ptr a)
+{
+#if UINTPTR_MAX == UINT32_MAX
+ tcg_gen_mov_i32(r, (NAT)a);
+#else
+ tcg_gen_extrl_i64_i32(r, (NAT)a);
+#endif
+}
+
+#undef PTR
+#undef NAT
+
+#endif /* TCG_TCG_OP_COMMON_H */
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
new file mode 100644
index 0000000000..4db8a58c14
--- /dev/null
+++ b/include/tcg/tcg-op-gvec-common.h
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Target independent generic vector operation expansion
+ *
+ * Copyright (c) 2018 Linaro
+ */
+
+#ifndef TCG_TCG_OP_GVEC_COMMON_H
+#define TCG_TCG_OP_GVEC_COMMON_H
+
+/*
+ * "Generic" vectors. All operands are given as offsets from ENV,
+ * and therefore cannot also be allocated via tcg_global_mem_new_*.
+ * OPRSZ is the byte size of the vector upon which the operation is performed.
+ * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared.
+ *
+ * All sizes must be 8 or any multiple of 16.
+ * When OPRSZ is 8, the alignment may be 8, otherwise must be 16.
+ * Operands may completely, but not partially, overlap.
+ */
+
+/* Expand a call to a gvec-style helper, with pointers to two vector
+ operands, and a descriptor (see tcg-gvec-desc.h). */
+typedef void gen_helper_gvec_2(TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2 *fn);
+
+/* Similarly, passing an extra data value. */
+typedef void gen_helper_gvec_2i(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
+void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_2i *fn);
+
+/* Similarly, passing an extra pointer (e.g. env or float_status). */
+typedef void gen_helper_gvec_2_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_2_ptr *fn);
+
+/* Similarly, with three vector operands. */
+typedef void gen_helper_gvec_3(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_3 *fn);
+
+/* Similarly, with four vector operands. */
+typedef void gen_helper_gvec_4(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_4 *fn);
+
+/* Similarly, with five vector operands. */
+typedef void gen_helper_gvec_5(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t xofs, uint32_t oprsz,
+ uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn);
+
+typedef void gen_helper_gvec_3_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
+ int32_t data, gen_helper_gvec_3_ptr *fn);
+
+typedef void gen_helper_gvec_4_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
+ uint32_t maxsz, int32_t data,
+ gen_helper_gvec_4_ptr *fn);
+
+typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
+ gen_helper_gvec_5_ptr *fn);
+
+/* Expand a gvec operation. Either inline or out-of-line depending on
+ the actual vector size and the operations supported by the host. */
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_2 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 2nd source operand. */
+ bool load_dest;
+} GVecGen2;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_2 *fno;
+ /* Expand out-of-line helper w/descriptor, data as argument. */
+ gen_helper_gvec_2i *fnoi;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen2i;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_2i *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ uint32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load scalar as 1st source operand. */
+ bool scalar_first;
+} GVecGen2s;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_3 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen3;
+
+typedef struct {
+ /*
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
+ * non-NULL.
+ */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_3 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Load dest as a 3rd source operand. */
+ bool load_dest;
+} GVecGen3i;
+
+typedef struct {
+ /* Expand inline as a 64-bit or 32-bit integer.
+ Only one of these will be non-NULL. */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
+ /* Expand out-of-line helper w/descriptor. */
+ gen_helper_gvec_4 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The data argument to the out-of-line helper. */
+ int32_t data;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+ /* Write aofs as a 2nd dest operand. */
+ bool write_aofs;
+} GVecGen4;
+
+typedef struct {
+ /*
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
+ * non-NULL.
+ */
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
+ /* Expand inline with a host vector type. */
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
+ gen_helper_gvec_4 *fno;
+ /* The optional opcodes, if any, utilized by .fniv. */
+ const TCGOpcode *opt_opc;
+ /* The vector element size, if applicable. */
+ uint8_t vece;
+ /* Prefer i64 to v64. */
+ bool prefer_i64;
+} GVecGen4i;
+
+void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
+void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
+ uint32_t maxsz, int64_t c, const GVecGen2i *);
+void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
+ uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
+void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
+void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen3i *);
+void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
+void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
+ const GVecGen4i *);
+
+/* Expand a specific vector operation. */
+
+void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+
+/* Saturated arithmetic. */
+void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+/* Min/max. */
+void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t s, uint32_t m);
+void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, uint64_t imm);
+void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, TCGv_i32);
+void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, TCGv_i64);
+
+void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * Perform vector shift by vector element, modulo the element size.
+ * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
+ */
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
+void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, uint32_t bofs,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, int64_t c,
+ uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
+ uint32_t aofs, TCGv_i64 c,
+ uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * Perform vector bit select: d = (b & a) | (c & ~a).
+ */
+void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t cofs,
+ uint32_t oprsz, uint32_t maxsz);
+
+/*
+ * 64-bit vector operations. Use these when the register has been allocated
+ * with tcg_global_mem_new_i64, and so we cannot also address it via pointer.
+ * OPRSZ = MAXSZ = 8.
+ */
+
+void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 a);
+void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
+void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
+
+void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
+void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
+
+/* 32-bit vector operations. */
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+
+void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
+
+#endif
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
index c69a7de984..b0a81ad4bf 100644
--- a/include/tcg/tcg-op-gvec.h
+++ b/include/tcg/tcg-op-gvec.h
@@ -1,404 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic vector operation expansion
+ * Target dependent generic vector operation expansion
*
* Copyright (c) 2018 Linaro
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TCG_TCG_OP_GVEC_H
#define TCG_TCG_OP_GVEC_H
-/*
- * "Generic" vectors. All operands are given as offsets from ENV,
- * and therefore cannot also be allocated via tcg_global_mem_new_*.
- * OPRSZ is the byte size of the vector upon which the operation is performed.
- * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared.
- *
- * All sizes must be 8 or any multiple of 16.
- * When OPRSZ is 8, the alignment may be 8, otherwise must be 16.
- * Operands may completely, but not partially, overlap.
- */
-
-/* Expand a call to a gvec-style helper, with pointers to two vector
- operands, and a descriptor (see tcg-gvec-desc.h). */
-typedef void gen_helper_gvec_2(TCGv_ptr, TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2 *fn);
-
-/* Similarly, passing an extra data value. */
-typedef void gen_helper_gvec_2i(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
-void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_2i *fn);
-
-/* Similarly, passing an extra pointer (e.g. env or float_status). */
-typedef void gen_helper_gvec_2_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_2_ptr *fn);
-
-/* Similarly, with three vector operands. */
-typedef void gen_helper_gvec_3(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_3 *fn);
-
-/* Similarly, with four vector operands. */
-typedef void gen_helper_gvec_4(TCGv_ptr, TCGv_ptr, TCGv_ptr,
- TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_4 *fn);
-
-/* Similarly, with five vector operands. */
-typedef void gen_helper_gvec_5(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
- TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t xofs, uint32_t oprsz,
- uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn);
-
-typedef void gen_helper_gvec_3_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
- TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
- int32_t data, gen_helper_gvec_3_ptr *fn);
-
-typedef void gen_helper_gvec_4_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
- TCGv_ptr, TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
- uint32_t maxsz, int32_t data,
- gen_helper_gvec_4_ptr *fn);
-
-typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
- TCGv_ptr, TCGv_ptr, TCGv_i32);
-void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
- uint32_t oprsz, uint32_t maxsz, int32_t data,
- gen_helper_gvec_5_ptr *fn);
-
-/* Expand a gvec operation. Either inline or out-of-line depending on
- the actual vector size and the operations supported by the host. */
-typedef struct {
- /* Expand inline as a 64-bit or 32-bit integer.
- Only one of these will be non-NULL. */
- void (*fni8)(TCGv_i64, TCGv_i64);
- void (*fni4)(TCGv_i32, TCGv_i32);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
- /* Expand out-of-line helper w/descriptor. */
- gen_helper_gvec_2 *fno;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The data argument to the out-of-line helper. */
- int32_t data;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Load dest as a 2nd source operand. */
- bool load_dest;
-} GVecGen2;
-
-typedef struct {
- /* Expand inline as a 64-bit or 32-bit integer.
- Only one of these will be non-NULL. */
- void (*fni8)(TCGv_i64, TCGv_i64, int64_t);
- void (*fni4)(TCGv_i32, TCGv_i32, int32_t);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, int64_t);
- /* Expand out-of-line helper w/descriptor, data in descriptor. */
- gen_helper_gvec_2 *fno;
- /* Expand out-of-line helper w/descriptor, data as argument. */
- gen_helper_gvec_2i *fnoi;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Load dest as a 3rd source operand. */
- bool load_dest;
-} GVecGen2i;
-
-typedef struct {
- /* Expand inline as a 64-bit or 32-bit integer.
- Only one of these will be non-NULL. */
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
- /* Expand out-of-line helper w/descriptor. */
- gen_helper_gvec_2i *fno;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The data argument to the out-of-line helper. */
- uint32_t data;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Load scalar as 1st source operand. */
- bool scalar_first;
-} GVecGen2s;
-
-typedef struct {
- /* Expand inline as a 64-bit or 32-bit integer.
- Only one of these will be non-NULL. */
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
- /* Expand out-of-line helper w/descriptor. */
- gen_helper_gvec_3 *fno;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The data argument to the out-of-line helper. */
- int32_t data;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Load dest as a 3rd source operand. */
- bool load_dest;
-} GVecGen3;
-
-typedef struct {
- /*
- * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
- * non-NULL.
- */
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
- /* Expand out-of-line helper w/descriptor, data in descriptor. */
- gen_helper_gvec_3 *fno;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Load dest as a 3rd source operand. */
- bool load_dest;
-} GVecGen3i;
-
-typedef struct {
- /* Expand inline as a 64-bit or 32-bit integer.
- Only one of these will be non-NULL. */
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64);
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
- /* Expand inline with a host vector type. */
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
- /* Expand out-of-line helper w/descriptor. */
- gen_helper_gvec_4 *fno;
- /* The optional opcodes, if any, utilized by .fniv. */
- const TCGOpcode *opt_opc;
- /* The data argument to the out-of-line helper. */
- int32_t data;
- /* The vector element size, if applicable. */
- uint8_t vece;
- /* Prefer i64 to v64. */
- bool prefer_i64;
- /* Write aofs as a 2nd dest operand. */
- bool write_aofs;
-} GVecGen4;
-
-void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
-void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, int64_t c, const GVecGen2i *);
-void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
-void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
-void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz, int64_t c,
- const GVecGen3i *);
-void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
- uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
-
-/* Expand a specific vector operation. */
-
-void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+#include "tcg/tcg-op-gvec-common.h"
-void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-
-/* Saturated arithmetic. */
-void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-
-/* Min/max. */
-void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t c, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t s, uint32_t m);
-void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
- uint32_t m, uint64_t imm);
-void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
- uint32_t m, TCGv_i32);
-void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
- uint32_t m, TCGv_i64);
+#ifndef TARGET_LONG_BITS
+#error must include QEMU headers
+#endif
#if TARGET_LONG_BITS == 64
-# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
+#define tcg_gen_vec_add32_tl tcg_gen_vec_add32_i64
+#define tcg_gen_vec_sub32_tl tcg_gen_vec_sub32_i64
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i64
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i64
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i64
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
+#elif TARGET_LONG_BITS == 32
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
+#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
+#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
+#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
+#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
+#define tcg_gen_vec_add32_tl tcg_gen_add_i32
+#define tcg_gen_vec_sub32_tl tcg_gen_sub_i32
+#define tcg_gen_vec_shl8i_tl tcg_gen_vec_shl8i_i32
+#define tcg_gen_vec_shr8i_tl tcg_gen_vec_shr8i_i32
+#define tcg_gen_vec_sar8i_tl tcg_gen_vec_sar8i_i32
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
#else
-# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
+# error
#endif
-void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
-
-/*
- * Perform vector shift by vector element, modulo the element size.
- * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
- */
-void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
-
-void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
- uint32_t aofs, uint32_t bofs,
- uint32_t oprsz, uint32_t maxsz);
-
-/*
- * Perform vector bit select: d = (b & a) | (c & ~a).
- */
-void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
- uint32_t bofs, uint32_t cofs,
- uint32_t oprsz, uint32_t maxsz);
-
-/*
- * 64-bit vector operations. Use these when the register has been allocated
- * with tcg_global_mem_new_i64, and so we cannot also address it via pointer.
- * OPRSZ = MAXSZ = 8.
- */
-
-void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 a);
-void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
-void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
-
-void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-
-void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
-
-void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
-void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
-void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
-
#endif
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 5abf17fecc..a02850583b 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -1,1029 +1,164 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Tiny Code Generator for QEMU
+ * Target dependent opcode generation functions.
*
* Copyright (c) 2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
*/
#ifndef TCG_TCG_OP_H
#define TCG_TCG_OP_H
-#include "tcg/tcg.h"
-#include "exec/helper-proto.h"
-#include "exec/helper-gen.h"
-
-/* Basic output routines. Not for general consumption. */
-
-void tcg_gen_op1(TCGOpcode, TCGArg);
-void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
-void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
-void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
-
-void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg);
-void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg);
-void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg);
-
-static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
-{
- tcg_gen_op1(opc, tcgv_i32_arg(a1));
-}
-
-static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
-{
- tcg_gen_op1(opc, tcgv_i64_arg(a1));
-}
-
-static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
-{
- tcg_gen_op1(opc, a1);
-}
-
-static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
-{
- tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
-}
-
-static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
-{
- tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
-}
-
-static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
-{
- tcg_gen_op2(opc, tcgv_i32_arg(a1), a2);
-}
-
-static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
-{
- tcg_gen_op2(opc, tcgv_i64_arg(a1), a2);
-}
-
-static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
-{
- tcg_gen_op2(opc, a1, a2);
-}
-
-static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
- TCGv_i32 a2, TCGv_i32 a3)
-{
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
-}
-
-static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
- TCGv_i64 a2, TCGv_i64 a3)
-{
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
-}
-
-static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
- TCGv_i32 a2, TCGArg a3)
-{
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
-}
-
-static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
- TCGv_i64 a2, TCGArg a3)
-{
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
-}
-
-static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
- TCGv_ptr base, TCGArg offset)
-{
- tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
-}
-
-static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
- TCGv_ptr base, TCGArg offset)
-{
- tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
-}
-
-static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4)
-{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4));
-}
-
-static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4)
-{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4));
-}
-
-static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGArg a4)
-{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), a4);
-}
-
-static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGArg a4)
-{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), a4);
-}
-
-static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGArg a3, TCGArg a4)
-{
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
-}
-
-static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGArg a3, TCGArg a4)
-{
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
-}
-
-static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
-{
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
-}
-
-static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
-{
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
-}
-
-static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
-{
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5);
-}
-
-static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
-{
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5);
-}
-
-static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGArg a4, TCGArg a5)
-{
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), a4, a5);
-}
-
-static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGArg a4, TCGArg a5)
-{
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), a4, a5);
-}
-
-static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4,
- TCGv_i32 a5, TCGv_i32 a6)
-{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
- tcgv_i32_arg(a6));
-}
-
-static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4,
- TCGv_i64 a5, TCGv_i64 a6)
-{
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
- tcgv_i64_arg(a6));
-}
-
-static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4,
- TCGv_i32 a5, TCGArg a6)
-{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
-}
-
-static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4,
- TCGv_i64 a5, TCGArg a6)
-{
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
-}
-
-static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
- TCGv_i32 a3, TCGv_i32 a4,
- TCGArg a5, TCGArg a6)
-{
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
-}
-
-static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
- TCGv_i64 a3, TCGv_i64 a4,
- TCGArg a5, TCGArg a6)
-{
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6);
-}
-
-
-/* Generic ops. */
-
-static inline void gen_set_label(TCGLabel *l)
-{
- l->present = 1;
- tcg_gen_op1(INDEX_op_set_label, label_arg(l));
-}
-
-static inline void tcg_gen_br(TCGLabel *l)
-{
- l->refs++;
- tcg_gen_op1(INDEX_op_br, label_arg(l));
-}
-
-void tcg_gen_mb(TCGBar);
-
-/* Helper calls. */
-
-/* 32 bit ops */
-
-void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
-void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
-void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
-void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
-void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
- unsigned int ofs, unsigned int len);
-void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
- unsigned int ofs);
-void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
-void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
-void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
- TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
- TCGv_i32 arg1, int32_t arg2);
-void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
- TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
-void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
-void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
-void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
-void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
-
-static inline void tcg_gen_discard_i32(TCGv_i32 arg)
-{
- tcg_gen_op1_i32(INDEX_op_discard, arg);
-}
-
-static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (ret != arg) {
- tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
- }
-}
-
-static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
-{
- tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg);
-}
-
-static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
-}
-
-static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
-{
- tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_neg_i32) {
- tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
- } else {
- tcg_gen_subfi_i32(ret, 0, arg);
- }
-}
-
-static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
-{
- if (TCG_TARGET_HAS_not_i32) {
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
- } else {
- tcg_gen_xori_i32(ret, arg, -1);
- }
-}
-
-/* 64 bit ops */
-
-void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
-void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
-void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
-void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
-void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
- unsigned int ofs, unsigned int len);
-void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
- unsigned int ofs, unsigned int len);
-void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
- unsigned int ofs);
-void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
-void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
-void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
- TCGv_i64 arg1, int64_t arg2);
-void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
- TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
-void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
-void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
-void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
-
-#if TCG_TARGET_REG_BITS == 64
-static inline void tcg_gen_discard_i64(TCGv_i64 arg)
-{
- tcg_gen_op1_i64(INDEX_op_discard, arg);
-}
-
-static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (ret != arg) {
- tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
- }
-}
-
-static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
-{
- tcg_gen_op2i_i64(INDEX_op_movi_i64, ret, arg);
-}
-
-static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
-}
-
-static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
-}
-
-static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
-}
-
-static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
-}
-#else /* TCG_TARGET_REG_BITS == 32 */
-static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
- tcg_target_long offset)
-{
- tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
-}
-
-static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
- TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
-}
-
-static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
- TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
-}
-
-void tcg_gen_discard_i64(TCGv_i64 arg);
-void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
-void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
-void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
-void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-#endif /* TCG_TARGET_REG_BITS */
-
-static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
-{
- if (TCG_TARGET_HAS_neg_i64) {
- tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
- } else {
- tcg_gen_subfi_i64(ret, 0, arg);
- }
-}
-
-/* Size changing operations. */
-
-void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
-void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
-void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
-void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
-void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
-void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
-void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
-
-static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
-{
- tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
-}
-
-/* QEMU specific operations. */
+#include "tcg/tcg-op-common.h"
#ifndef TARGET_LONG_BITS
#error must include QEMU headers
#endif
-#if TARGET_INSN_START_WORDS == 1
-# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
-static inline void tcg_gen_insn_start(target_ulong pc)
-{
- tcg_gen_op1(INDEX_op_insn_start, pc);
-}
-# else
+#if TARGET_LONG_BITS == 32
+# define TCG_TYPE_TL TCG_TYPE_I32
+#elif TARGET_LONG_BITS == 64
+# define TCG_TYPE_TL TCG_TYPE_I64
+#else
+# error
+#endif
+
+#ifndef TARGET_INSN_START_EXTRA_WORDS
static inline void tcg_gen_insn_start(target_ulong pc)
{
- tcg_gen_op2(INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32));
-}
-# endif
-#elif TARGET_INSN_START_WORDS == 2
-# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
-static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
-{
- tcg_gen_op2(INDEX_op_insn_start, pc, a1);
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
}
-# else
+#elif TARGET_INSN_START_EXTRA_WORDS == 1
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
{
- tcg_gen_op4(INDEX_op_insn_start,
- (uint32_t)pc, (uint32_t)(pc >> 32),
- (uint32_t)a1, (uint32_t)(a1 >> 32));
-}
-# endif
-#elif TARGET_INSN_START_WORDS == 3
-# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
-static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
- target_ulong a2)
-{
- tcg_gen_op3(INDEX_op_insn_start, pc, a1, a2);
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
+ tcg_set_insn_start_param(op, 1, a1);
}
-# else
+#elif TARGET_INSN_START_EXTRA_WORDS == 2
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
target_ulong a2)
{
- tcg_gen_op6(INDEX_op_insn_start,
- (uint32_t)pc, (uint32_t)(pc >> 32),
- (uint32_t)a1, (uint32_t)(a1 >> 32),
- (uint32_t)a2, (uint32_t)(a2 >> 32));
+ TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS);
+ tcg_set_insn_start_param(op, 0, pc);
+ tcg_set_insn_start_param(op, 1, a1);
+ tcg_set_insn_start_param(op, 2, a2);
}
-# endif
#else
-# error "Unhandled number of operands to insn_start"
+#error Unhandled TARGET_INSN_START_EXTRA_WORDS value
#endif
-/**
- * tcg_gen_exit_tb() - output exit_tb TCG operation
- * @tb: The TranslationBlock from which we are exiting
- * @idx: Direct jump slot index, or exit request
- *
- * See tcg/README for more info about this TCG operation.
- * See also tcg.h and the block comment above TB_EXIT_MASK.
- *
- * For a normal exit from the TB, back to the main loop, @tb should
- * be NULL and @idx should be 0. Otherwise, @tb should be valid and
- * @idx should be one of the TB_EXIT_ values.
- */
-void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx);
-
-/**
- * tcg_gen_goto_tb() - output goto_tb TCG operation
- * @idx: Direct jump slot index (0 or 1)
- *
- * See tcg/README for more info about this TCG operation.
- *
- * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within
- * the pages this TB resides in because we don't take care of direct jumps when
- * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
- * static address translation, so the destination address is always valid, TBs
- * are always invalidated properly, and direct jumps are reset when mapping
- * changes.
- */
-void tcg_gen_goto_tb(unsigned idx);
-
-/**
- * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
- * @addr: Guest address of the target TB
- *
- * If the TB is not valid, jump to the epilogue.
- *
- * This operation is optional. If the TCG backend does not implement goto_ptr,
- * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
- */
-void tcg_gen_lookup_and_goto_ptr(void);
-
-static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type,
- unsigned wr)
-{
- tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
-}
-
-static inline void tcg_gen_plugin_cb_end(void)
-{
- tcg_emit_op(INDEX_op_plugin_cb_end);
-}
-
#if TARGET_LONG_BITS == 32
+typedef TCGv_i32 TCGv;
#define tcg_temp_new() tcg_temp_new_i32()
-#define tcg_global_reg_new tcg_global_reg_new_i32
#define tcg_global_mem_new tcg_global_mem_new_i32
-#define tcg_temp_local_new() tcg_temp_local_new_i32()
-#define tcg_temp_free tcg_temp_free_i32
+#define tcgv_tl_temp tcgv_i32_temp
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
-#else
+#elif TARGET_LONG_BITS == 64
+typedef TCGv_i64 TCGv;
#define tcg_temp_new() tcg_temp_new_i64()
-#define tcg_global_reg_new tcg_global_reg_new_i64
#define tcg_global_mem_new tcg_global_mem_new_i64
-#define tcg_temp_local_new() tcg_temp_local_new_i64()
-#define tcg_temp_free tcg_temp_free_i64
+#define tcgv_tl_temp tcgv_i64_temp
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
+#else
+#error Unhandled TARGET_LONG_BITS value
#endif
-void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp);
-void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp);
-void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp);
-void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
-
-static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
-{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
-}
-
-static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
-{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
-}
-
-static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
-{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
-}
-
-static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
-{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
-}
-
-static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
-{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
-}
-
-static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_ld_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
+ tcg_gen_qemu_ld_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_st_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_st_i32_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_ld_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
+ tcg_gen_qemu_ld_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_st_i64(TCGv_i64 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
+ tcg_gen_qemu_st_i64_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_ld_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
+ tcg_gen_qemu_ld_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
+static inline void
+tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m)
{
- tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
}
-void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
- TCGArg, MemOp);
-void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
- TCGArg, MemOp);
-
-void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-
-void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
+#define DEF_ATOMIC2(N, S) \
+ static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \
+ TCGArg i, MemOp m) \
+ { N##_##S##_chk(r, tcgv_tl_temp(a), v, i, m, TCG_TYPE_TL); }
-void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
-void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp);
-void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp);
+#define DEF_ATOMIC3(N, S) \
+ static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S o, \
+ TCGv_##S n, TCGArg i, MemOp m) \
+ { N##_##S##_chk(r, tcgv_tl_temp(a), o, n, i, m, TCG_TYPE_TL); }
-void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
-void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
-void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
-void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
-void tcg_gen_dup8i_vec(TCGv_vec, uint32_t);
-void tcg_gen_dup16i_vec(TCGv_vec, uint32_t);
-void tcg_gen_dup32i_vec(TCGv_vec, uint32_t);
-void tcg_gen_dup64i_vec(TCGv_vec, uint64_t);
-void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
-void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
-void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
-void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
-void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
-void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i32)
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i64)
+DEF_ATOMIC3(tcg_gen_atomic_cmpxchg, i128)
-void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
-void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
-void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
-void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
-void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i32)
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i64)
+DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128)
-void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
-void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
-void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
-void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
+DEF_ATOMIC2(tcg_gen_atomic_xchg, i32)
+DEF_ATOMIC2(tcg_gen_atomic_xchg, i64)
-void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
-void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
-void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
-void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
-void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umin, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_smax, i64)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i32)
+DEF_ATOMIC2(tcg_gen_atomic_fetch_umax, i64)
-void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
- TCGv_vec a, TCGv_vec b);
+DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_add_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_and_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_or_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_xor_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_smin_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_umin_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_smax_fetch, i64)
+DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i32)
+DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
-void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
- TCGv_vec b, TCGv_vec c);
-void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
- TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d);
-
-void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
-void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
-void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
+#undef DEF_ATOMIC2
+#undef DEF_ATOMIC3
#if TARGET_LONG_BITS == 64
#define tcg_gen_movi_tl tcg_gen_movi_i64
@@ -1063,6 +198,8 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64
#define tcg_gen_setcond_tl tcg_gen_setcond_i64
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i64
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i64
#define tcg_gen_mul_tl tcg_gen_mul_i64
#define tcg_gen_muli_tl tcg_gen_muli_i64
#define tcg_gen_div_tl tcg_gen_div_i64
@@ -1082,9 +219,13 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64
#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64
#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64
+#define tcg_gen_ext_tl tcg_gen_ext_i64
#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64
#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64
#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64
+#define tcg_gen_bswap_tl tcg_gen_bswap64_i64
+#define tcg_gen_hswap_tl tcg_gen_hswap_i64
+#define tcg_gen_wswap_tl tcg_gen_wswap_i64
#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64
#define tcg_gen_extr_i64_tl tcg_gen_extr32_i64
#define tcg_gen_andc_tl tcg_gen_andc_i64
@@ -1107,8 +248,7 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_extract_tl tcg_gen_extract_i64
#define tcg_gen_sextract_tl tcg_gen_sextract_i64
#define tcg_gen_extract2_tl tcg_gen_extract2_i64
-#define tcg_const_tl tcg_const_i64
-#define tcg_const_local_tl tcg_const_local_i64
+#define tcg_constant_tl tcg_constant_i64
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
#define tcg_gen_add2_tl tcg_gen_add2_i64
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
@@ -1138,6 +278,8 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
+#define tcg_gen_dup_tl tcg_gen_dup_i64
+#define dup_const_tl dup_const
#else
#define tcg_gen_movi_tl tcg_gen_movi_i32
#define tcg_gen_mov_tl tcg_gen_mov_i32
@@ -1176,6 +318,8 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32
#define tcg_gen_setcond_tl tcg_gen_setcond_i32
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i32
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i32
#define tcg_gen_mul_tl tcg_gen_mul_i32
#define tcg_gen_muli_tl tcg_gen_muli_i32
#define tcg_gen_div_tl tcg_gen_div_i32
@@ -1195,8 +339,11 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32
#define tcg_gen_ext32u_tl tcg_gen_mov_i32
#define tcg_gen_ext32s_tl tcg_gen_mov_i32
+#define tcg_gen_ext_tl tcg_gen_ext_i32
#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32
-#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32
+#define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S)
+#define tcg_gen_bswap_tl tcg_gen_bswap32_i32
+#define tcg_gen_hswap_tl tcg_gen_hswap_i32
#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64
#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32
#define tcg_gen_andc_tl tcg_gen_andc_i32
@@ -1219,8 +366,7 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_extract_tl tcg_gen_extract_i32
#define tcg_gen_sextract_tl tcg_gen_sextract_i32
#define tcg_gen_extract2_tl tcg_gen_extract2_i32
-#define tcg_const_tl tcg_const_i32
-#define tcg_const_local_tl tcg_const_local_i32
+#define tcg_constant_tl tcg_constant_i32
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
#define tcg_gen_add2_tl tcg_gen_add2_i32
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
@@ -1250,84 +396,15 @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
-#endif
-
-#if UINTPTR_MAX == UINT32_MAX
-# define PTR i32
-# define NAT TCGv_i32
-#else
-# define PTR i64
-# define NAT TCGv_i64
-#endif
-
-static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
-{
- glue(tcg_gen_ld_,PTR)((NAT)r, a, o);
-}
-
-static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
-{
- glue(tcg_gen_st_, PTR)((NAT)r, a, o);
-}
-
-static inline void tcg_gen_discard_ptr(TCGv_ptr a)
-{
- glue(tcg_gen_discard_,PTR)((NAT)a);
-}
-
-static inline void tcg_gen_add_ptr(TCGv_ptr r, TCGv_ptr a, TCGv_ptr b)
-{
- glue(tcg_gen_add_,PTR)((NAT)r, (NAT)a, (NAT)b);
-}
-
-static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b)
-{
- glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b);
-}
-
-static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
- intptr_t b, TCGLabel *label)
-{
- glue(tcg_gen_brcondi_,PTR)(cond, (NAT)a, b, label);
-}
-
-static inline void tcg_gen_ext_i32_ptr(TCGv_ptr r, TCGv_i32 a)
-{
-#if UINTPTR_MAX == UINT32_MAX
- tcg_gen_mov_i32((NAT)r, a);
-#else
- tcg_gen_ext_i32_i64((NAT)r, a);
-#endif
-}
-
-static inline void tcg_gen_trunc_i64_ptr(TCGv_ptr r, TCGv_i64 a)
-{
-#if UINTPTR_MAX == UINT32_MAX
- tcg_gen_extrl_i64_i32((NAT)r, a);
-#else
- tcg_gen_mov_i64((NAT)r, a);
-#endif
-}
-
-static inline void tcg_gen_extu_ptr_i64(TCGv_i64 r, TCGv_ptr a)
-{
-#if UINTPTR_MAX == UINT32_MAX
- tcg_gen_extu_i32_i64(r, (NAT)a);
-#else
- tcg_gen_mov_i64(r, (NAT)a);
-#endif
-}
-
-static inline void tcg_gen_trunc_ptr_i32(TCGv_i32 r, TCGv_ptr a)
-{
-#if UINTPTR_MAX == UINT32_MAX
- tcg_gen_mov_i32(r, (NAT)a);
-#else
- tcg_gen_extrl_i64_i32(r, (NAT)a);
-#endif
-}
+#define tcg_gen_dup_tl tcg_gen_dup_i32
-#undef PTR
-#undef NAT
+#define dup_const_tl(VECE, C) \
+ (__builtin_constant_p(VECE) \
+ ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
+ : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
+ : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
+ : (qemu_build_not_reached_always(), 0)) \
+ : (target_long)dup_const(VECE, C))
+#endif /* TARGET_LONG_BITS == 64 */
#endif /* TCG_TCG_OP_H */
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
index e3929b80d2..b80227fa1c 100644
--- a/include/tcg/tcg-opc.h
+++ b/include/tcg/tcg-opc.h
@@ -45,9 +45,9 @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END)
DEF(mb, 0, 0, 1, 0)
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
-DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(setcond_i32, 1, 2, 1, 0)
-DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32))
+DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32))
+DEF(movcond_i32, 1, 4, 1, 0)
/* load/store */
DEF(ld8u_i32, 1, 1, 1, 0)
DEF(ld8s_i32, 1, 1, 1, 0)
@@ -81,7 +81,7 @@ DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
-DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END)
+DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
@@ -89,17 +89,18 @@ DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
-DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | IMPL(TCG_TARGET_REG_BITS == 32))
+DEF(brcond2_i32, 0, 4, 2,
+ TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32))
DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
-DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32))
-DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32))
+DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
+DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
-DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32))
+DEF(neg_i32, 1, 1, 0, 0)
DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
@@ -110,9 +111,9 @@ DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
-DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
DEF(setcond_i64, 1, 2, 1, IMPL64)
-DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64))
+DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
+DEF(movcond_i64, 1, 4, 1, IMPL64)
/* load/store */
DEF(ld8u_i64, 1, 1, 1, IMPL64)
DEF(ld8s_i64, 1, 1, 1, IMPL64)
@@ -153,24 +154,24 @@ DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
DEF(extrl_i64_i32, 1, 1, 0,
- IMPL(TCG_TARGET_HAS_extrl_i64_i32)
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
DEF(extrh_i64_i32, 1, 1, 0,
- IMPL(TCG_TARGET_HAS_extrh_i64_i32)
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
-DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
+DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
-DEF(bswap16_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
-DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
-DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
+DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
+DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
+DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
-DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64))
+DEF(neg_i64, 1, 1, 0, IMPL64)
DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
@@ -187,35 +188,64 @@ DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
-#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2)
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
-/* QEMU specific */
-DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS,
- TCG_OPF_NOT_PRESENT)
+/* There are tcg_ctx->insn_start_words here, not just one. */
+DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
+
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
-DEF(goto_ptr, 0, 1, 0,
- TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr))
+DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
-DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
+/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
+DEF(qemu_ld_a32_i32, 1, 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1,
+DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
-DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
+DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
-DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
+DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
+
+/* Only used by i386 to cope with stupid register constraints. */
+DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
+ IMPL(TCG_TARGET_HAS_qemu_st8_i32))
+DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
+ IMPL(TCG_TARGET_HAS_qemu_st8_i32))
+
+/* Only for 64-bit hosts at the moment. */
+DEF(qemu_ld_a32_i128, 2, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_ld_a64_i128, 2, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_st_a32_i128, 0, 3, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_st_a64_i128, 0, 3, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+
/* Host vector support. */
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
-DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
DEF(dup_vec, 1, 1, 0, IMPLVEC)
DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
@@ -243,6 +273,9 @@ DEF(or_vec, 1, 2, 0, IMPLVEC)
DEF(xor_vec, 1, 2, 0, IMPLVEC)
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
+DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
+DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
+DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
@@ -272,7 +305,12 @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
#include "tcg-target.opc.h"
#endif
-#undef TLADDR_ARGS
+#ifdef TCG_TARGET_INTERPRETER
+/* These opcodes are only for use between the tci generator and interpreter. */
+DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
+#endif
+
#undef DATA64_ARGS
#undef IMPL
#undef IMPL64
diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h
new file mode 100644
index 0000000000..44192c55a9
--- /dev/null
+++ b/include/tcg/tcg-temp-internal.h
@@ -0,0 +1,45 @@
+/*
+ * TCG internals related to TCG temp allocation
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_TEMP_INTERNAL_H
+#define TCG_TEMP_INTERNAL_H
+
+/*
+ * Allocation and freeing of EBB temps is reserved to TCG internals
+ */
+
+void tcg_temp_free_internal(TCGTemp *);
+
+void tcg_temp_free_i32(TCGv_i32 arg);
+void tcg_temp_free_i64(TCGv_i64 arg);
+void tcg_temp_free_i128(TCGv_i128 arg);
+void tcg_temp_free_ptr(TCGv_ptr arg);
+void tcg_temp_free_vec(TCGv_vec arg);
+
+TCGv_i32 tcg_temp_ebb_new_i32(void);
+TCGv_i64 tcg_temp_ebb_new_i64(void);
+TCGv_ptr tcg_temp_ebb_new_ptr(void);
+TCGv_i128 tcg_temp_ebb_new_i128(void);
+
+#endif /* TCG_TEMP_FREE_H */
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index e63450a893..05a1912f8a 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -25,45 +25,24 @@
#ifndef TCG_H
#define TCG_H
-#include "cpu.h"
#include "exec/memop.h"
-#include "exec/tb-context.h"
+#include "exec/memopidx.h"
#include "qemu/bitops.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "tcg/tcg-mo.h"
+#include "tcg-target-reg-bits.h"
#include "tcg-target.h"
-#include "qemu/int128.h"
+#include "tcg/tcg-cond.h"
+#include "tcg/debug-assert.h"
/* XXX: make safe guess about sizes */
#define MAX_OP_PER_INSTR 266
-#if HOST_LONG_BITS == 32
-#define MAX_OPC_PARAM_PER_ARG 2
-#else
-#define MAX_OPC_PARAM_PER_ARG 1
-#endif
-#define MAX_OPC_PARAM_IARGS 6
-#define MAX_OPC_PARAM_OARGS 1
-#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
-
-/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
- * and up to 4 + N parameters on 64-bit archs
- * (N = number of input arguments + output arguments). */
-#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define MAX_CALL_IARGS 7
#define CPU_TEMP_BUF_NLONGS 128
-
-/* Default target word size to pointer size. */
-#ifndef TCG_TARGET_REG_BITS
-# if UINTPTR_MAX == UINT32_MAX
-# define TCG_TARGET_REG_BITS 32
-# elif UINTPTR_MAX == UINT64_MAX
-# define TCG_TARGET_REG_BITS 64
-# else
-# error Unknown pointer size for tcg target
-# endif
-#endif
+#define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long))
#if TCG_TARGET_REG_BITS == 32
typedef int32_t tcg_target_long;
@@ -79,15 +58,6 @@ typedef uint64_t tcg_target_ulong;
#error unsupported
#endif
-/* Oversized TCG guests make things like MTTCG hard
- * as we can't use atomics for cputlb updates.
- */
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
-#define TCG_OVERSIZED_GUEST 1
-#else
-#define TCG_OVERSIZED_GUEST 0
-#endif
-
#if TCG_TARGET_NB_REGS <= 32
typedef uint32_t TCGRegSet;
#elif TCG_TARGET_NB_REGS <= 64
@@ -98,8 +68,7 @@ typedef uint64_t TCGRegSet;
#if TCG_TARGET_REG_BITS == 32
/* Turn some undef macros into false macros. */
-#define TCG_TARGET_HAS_extrl_i64_i32 0
-#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_extr_i64_i32 0
#define TCG_TARGET_HAS_div_i64 0
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_div2_i64 0
@@ -113,7 +82,6 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_bswap16_i64 0
#define TCG_TARGET_HAS_bswap32_i64 0
#define TCG_TARGET_HAS_bswap64_i64 0
-#define TCG_TARGET_HAS_neg_i64 0
#define TCG_TARGET_HAS_not_i64 0
#define TCG_TARGET_HAS_andc_i64 0
#define TCG_TARGET_HAS_orc_i64 0
@@ -127,7 +95,7 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_extract_i64 0
#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_movcond_i64 0
+#define TCG_TARGET_HAS_negsetcond_i64 0
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 0
@@ -166,13 +134,6 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_rem_i64 0
#endif
-/* For 32-bit targets, some sort of unsigned widening multiply is required. */
-#if TCG_TARGET_REG_BITS == 32 \
- && !(defined(TCG_TARGET_HAS_mulu2_i32) \
- || defined(TCG_TARGET_HAS_muluh_i32))
-# error "Missing unsigned widening multiply"
-#endif
-
#if !defined(TCG_TARGET_HAS_v64) \
&& !defined(TCG_TARGET_HAS_v128) \
&& !defined(TCG_TARGET_HAS_v256)
@@ -182,6 +143,9 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_andc_vec 0
#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_roti_vec 0
#define TCG_TARGET_HAS_rots_vec 0
#define TCG_TARGET_HAS_rotv_vec 0
@@ -206,12 +170,6 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_v256 0
#endif
-#ifndef TARGET_INSN_START_EXTRA_WORDS
-# define TARGET_INSN_START_WORDS 1
-#else
-# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
-#endif
-
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg/tcg-opc.h"
@@ -237,14 +195,6 @@ typedef uint64_t tcg_insn_unit;
/* The port better have done this. */
#endif
-
-#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
-# define tcg_debug_assert(X) do { assert(X); } while (0)
-#else
-# define tcg_debug_assert(X) \
- do { if (!(X)) { __builtin_unreachable(); } } while (0)
-#endif
-
typedef struct TCGRelocation TCGRelocation;
struct TCGRelocation {
QSIMPLEQ_ENTRY(TCGRelocation) next;
@@ -253,16 +203,23 @@ struct TCGRelocation {
int type;
};
+typedef struct TCGOp TCGOp;
+typedef struct TCGLabelUse TCGLabelUse;
+struct TCGLabelUse {
+ QSIMPLEQ_ENTRY(TCGLabelUse) next;
+ TCGOp *op;
+};
+
typedef struct TCGLabel TCGLabel;
struct TCGLabel {
- unsigned present : 1;
- unsigned has_value : 1;
- unsigned id : 14;
- unsigned refs : 16;
+ bool present;
+ bool has_value;
+ uint16_t id;
union {
uintptr_t value;
- tcg_insn_unit *value_ptr;
+ const tcg_insn_unit *value_ptr;
} u;
+ QSIMPLEQ_HEAD(, TCGLabelUse) branches;
QSIMPLEQ_HEAD(, TCGRelocation) relocs;
QSIMPLEQ_ENTRY(TCGLabel) next;
};
@@ -285,12 +242,14 @@ typedef struct TCGPool {
typedef enum TCGType {
TCG_TYPE_I32,
TCG_TYPE_I64,
+ TCG_TYPE_I128,
TCG_TYPE_V64,
TCG_TYPE_V128,
TCG_TYPE_V256,
- TCG_TYPE_COUNT, /* number of different types */
+ /* Number of different types (integer not enum) */
+#define TCG_TYPE_COUNT (TCG_TYPE_V256 + 1)
/* An alias for the size of the host register. */
#if TCG_TARGET_REG_BITS == 32
@@ -305,16 +264,25 @@ typedef enum TCGType {
#else
TCG_TYPE_PTR = TCG_TYPE_I64,
#endif
-
- /* An alias for the size of the target "long", aka register. */
-#if TARGET_LONG_BITS == 64
- TCG_TYPE_TL = TCG_TYPE_I64,
-#else
- TCG_TYPE_TL = TCG_TYPE_I32,
-#endif
} TCGType;
/**
+ * tcg_type_size
+ * @t: type
+ *
+ * Return the size of the type in bytes.
+ */
+static inline int tcg_type_size(TCGType t)
+{
+ unsigned i = t;
+ if (i >= TCG_TYPE_V64) {
+ tcg_debug_assert(i < TCG_TYPE_COUNT);
+ i -= TCG_TYPE_V64 - 1;
+ }
+ return 4 << i;
+}
+
+/**
* get_alignment_bits
* @memop: MemOp value
*
@@ -334,10 +302,6 @@ static inline unsigned get_alignment_bits(MemOp memop)
/* A specific alignment requirement. */
a = a >> MO_ASHIFT;
}
-#if defined(CONFIG_SOFTMMU)
- /* The requested alignment cannot overlap the TLB flags. */
- tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
-#endif
return a;
}
@@ -349,13 +313,14 @@ typedef tcg_target_ulong TCGArg;
in tcg/README. Target CPU front-end code uses these types to deal
with TCG variables as it emits TCG code via the tcg_gen_* functions.
They come in several flavours:
- * TCGv_i32 : 32 bit integer type
- * TCGv_i64 : 64 bit integer type
- * TCGv_ptr : a host pointer type
- * TCGv_vec : a host vector type; the exact size is not exposed
- to the CPU front-end code.
- * TCGv : an integer type the same size as target_ulong
- (an alias for either TCGv_i32 or TCGv_i64)
+ * TCGv_i32 : 32 bit integer type
+ * TCGv_i64 : 64 bit integer type
+ * TCGv_i128 : 128 bit integer type
+ * TCGv_ptr : a host pointer type
+ * TCGv_vec : a host vector type; the exact size is not exposed
+ to the CPU front-end code.
+ * TCGv : an integer type the same size as target_ulong
+ (an alias for either TCGv_i32 or TCGv_i64)
The compiler's type checking will complain if you mix them
up and pass the wrong sized TCGv to a function.
@@ -375,16 +340,10 @@ typedef tcg_target_ulong TCGArg;
typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64;
+typedef struct TCGv_i128_d *TCGv_i128;
typedef struct TCGv_ptr_d *TCGv_ptr;
typedef struct TCGv_vec_d *TCGv_vec;
typedef TCGv_ptr TCGv_env;
-#if TARGET_LONG_BITS == 32
-#define TCGv TCGv_i32
-#elif TARGET_LONG_BITS == 64
-#define TCGv TCGv_i64
-#else
-#error Unhandled TARGET_LONG_BITS value
-#endif
/* call flags */
/* Helper does not read globals (either directly or through an exception). It
@@ -394,8 +353,10 @@ typedef TCGv_ptr TCGv_env;
#define TCG_CALL_NO_WRITE_GLOBALS 0x0002
/* Helper can be safely suppressed if the return value is not used. */
#define TCG_CALL_NO_SIDE_EFFECTS 0x0004
-/* Helper is QEMU_NORETURN. */
+/* Helper is G_NORETURN. */
#define TCG_CALL_NO_RETURN 0x0008
+/* Helper is part of Plugins. */
+#define TCG_CALL_PLUGIN 0x0010
/* convenience version of most used call flags */
#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
@@ -404,77 +365,17 @@ typedef TCGv_ptr TCGv_env;
#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
-/* Used to align parameters. See the comment before tcgv_i32_temp. */
-#define TCG_CALL_DUMMY_ARG ((TCGArg)0)
-
-/* Conditions. Note that these are laid out for easy manipulation by
- the functions below:
- bit 0 is used for inverting;
- bit 1 is signed,
- bit 2 is unsigned,
- bit 3 is used with bit 0 for swapping signed/unsigned. */
-typedef enum {
- /* non-signed */
- TCG_COND_NEVER = 0 | 0 | 0 | 0,
- TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
- TCG_COND_EQ = 8 | 0 | 0 | 0,
- TCG_COND_NE = 8 | 0 | 0 | 1,
- /* signed */
- TCG_COND_LT = 0 | 0 | 2 | 0,
- TCG_COND_GE = 0 | 0 | 2 | 1,
- TCG_COND_LE = 8 | 0 | 2 | 0,
- TCG_COND_GT = 8 | 0 | 2 | 1,
- /* unsigned */
- TCG_COND_LTU = 0 | 4 | 0 | 0,
- TCG_COND_GEU = 0 | 4 | 0 | 1,
- TCG_COND_LEU = 8 | 4 | 0 | 0,
- TCG_COND_GTU = 8 | 4 | 0 | 1,
-} TCGCond;
-
-/* Invert the sense of the comparison. */
-static inline TCGCond tcg_invert_cond(TCGCond c)
-{
- return (TCGCond)(c ^ 1);
-}
-
-/* Swap the operands in a comparison. */
-static inline TCGCond tcg_swap_cond(TCGCond c)
-{
- return c & 6 ? (TCGCond)(c ^ 9) : c;
-}
-
-/* Create an "unsigned" version of a "signed" comparison. */
-static inline TCGCond tcg_unsigned_cond(TCGCond c)
-{
- return c & 2 ? (TCGCond)(c ^ 6) : c;
-}
-
-/* Create a "signed" version of an "unsigned" comparison. */
-static inline TCGCond tcg_signed_cond(TCGCond c)
-{
- return c & 4 ? (TCGCond)(c ^ 6) : c;
-}
-
-/* Must a comparison be considered unsigned? */
-static inline bool is_unsigned_cond(TCGCond c)
-{
- return (c & 4) != 0;
-}
-
-/* Create a "high" version of a double-word comparison.
- This removes equality from a LTE or GTE comparison. */
-static inline TCGCond tcg_high_cond(TCGCond c)
-{
- switch (c) {
- case TCG_COND_GE:
- case TCG_COND_LE:
- case TCG_COND_GEU:
- case TCG_COND_LEU:
- return (TCGCond)(c ^ 8);
- default:
- return c;
- }
-}
+/*
+ * Flags for the bswap opcodes.
+ * If IZ, the input is zero-extended, otherwise unknown.
+ * If OZ or OS, the output is zero- or sign-extended respectively,
+ * otherwise the high bits are undefined.
+ */
+enum {
+ TCG_BSWAP_IZ = 1,
+ TCG_BSWAP_OZ = 2,
+ TCG_BSWAP_OS = 4,
+};
typedef enum TCGTempVal {
TEMP_VAL_DEAD,
@@ -483,26 +384,37 @@ typedef enum TCGTempVal {
TEMP_VAL_CONST,
} TCGTempVal;
+typedef enum TCGTempKind {
+ /*
+ * Temp is dead at the end of the extended basic block (EBB),
+ * the single-entry multiple-exit region that falls through
+ * conditional branches.
+ */
+ TEMP_EBB,
+ /* Temp is live across the entire translation block, but dead at end. */
+ TEMP_TB,
+ /* Temp is live across the entire translation block, and between them. */
+ TEMP_GLOBAL,
+ /* Temp is in a fixed register. */
+ TEMP_FIXED,
+ /* Temp is a fixed constant. */
+ TEMP_CONST,
+} TCGTempKind;
+
typedef struct TCGTemp {
TCGReg reg:8;
TCGTempVal val_type:8;
TCGType base_type:8;
TCGType type:8;
- unsigned int fixed_reg:1;
+ TCGTempKind kind:3;
unsigned int indirect_reg:1;
unsigned int indirect_base:1;
unsigned int mem_coherent:1;
unsigned int mem_allocated:1;
- /* If true, the temp is saved across both basic blocks and
- translation blocks. */
- unsigned int temp_global:1;
- /* If true, the temp is saved across basic blocks but dead
- at the end of translation blocks. If false, the temp is
- dead at the end of basic blocks. */
- unsigned int temp_local:1;
unsigned int temp_allocated:1;
+ unsigned int temp_subindex:2;
- tcg_target_long val;
+ int64_t val;
struct TCGTemp *mem_base;
intptr_t mem_offset;
const char *name;
@@ -520,38 +432,35 @@ typedef struct TCGTempSet {
unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
} TCGTempSet;
-/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
- this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
- There are never more than 2 outputs, which means that we can store all
- dead + sync data within 16 bits. */
-#define DEAD_ARG 4
-#define SYNC_ARG 1
-typedef uint16_t TCGLifeData;
+/*
+ * With 1 128-bit output, a 32-bit host requires 4 output parameters,
+ * which leaves a maximum of 28 other slots. Which is enough for 7
+ * 128-bit operands.
+ */
+#define DEAD_ARG (1 << 4)
+#define SYNC_ARG (1 << 0)
+typedef uint32_t TCGLifeData;
-/* The layout here is designed to avoid a bitfield crossing of
- a 32-bit boundary, which would cause GCC to add extra padding. */
-typedef struct TCGOp {
- TCGOpcode opc : 8; /* 8 */
+struct TCGOp {
+ TCGOpcode opc : 8;
+ unsigned nargs : 8;
/* Parameters for this opcode. See below. */
- unsigned param1 : 4; /* 12 */
- unsigned param2 : 4; /* 16 */
+ unsigned param1 : 8;
+ unsigned param2 : 8;
/* Lifetime data of the operands. */
- unsigned life : 16; /* 32 */
+ TCGLifeData life;
/* Next and previous opcodes. */
QTAILQ_ENTRY(TCGOp) link;
-#ifdef CONFIG_PLUGIN
- QSIMPLEQ_ENTRY(TCGOp) plugin_link;
-#endif
-
- /* Arguments for the opcode. */
- TCGArg args[MAX_OPC_PARAM];
/* Register preferences for the output(s). */
TCGRegSet output_pref[2];
-} TCGOp;
+
+ /* Arguments for the opcode. */
+ TCGArg args[];
+};
#define TCGOP_CALLI(X) (X)->param1
#define TCGOP_CALLO(X) (X)->param2
@@ -562,26 +471,10 @@ typedef struct TCGOp {
/* Make sure operands fit in the bitfields above. */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
-typedef struct TCGProfile {
- int64_t cpu_exec_time;
- int64_t tb_count1;
- int64_t tb_count;
- int64_t op_count; /* total insn count */
- int op_count_max; /* max insn per TB */
- int temp_count_max;
- int64_t temp_count;
- int64_t del_op_count;
- int64_t code_in_len;
- int64_t code_out_len;
- int64_t search_out_len;
- int64_t interm_time;
- int64_t code_time;
- int64_t la_time;
- int64_t opt_time;
- int64_t restore_count;
- int64_t restore_time;
- int64_t table_op_count[NB_OPS];
-} TCGProfile;
+static inline TCGRegSet output_pref(const TCGOp *op, unsigned i)
+{
+ return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
+}
struct TCGContext {
uint8_t *pool_cur, *pool_end;
@@ -591,28 +484,25 @@ struct TCGContext {
int nb_temps;
int nb_indirects;
int nb_ops;
+ TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
- /* goto_tb support */
- tcg_insn_unit *code_buf;
- uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
- uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
- uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
+ int page_mask;
+ uint8_t page_bits;
+ uint8_t tlb_dyn_max_bits;
+ uint8_t insn_start_words;
+ TCGBar guest_mo;
TCGRegSet reserved_regs;
- uint32_t tb_cflags; /* cflags of the current TB */
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
TCGTemp *frame_temp;
- tcg_insn_unit *code_ptr;
-
-#ifdef CONFIG_PROFILER
- TCGProfile prof;
-#endif
+ TranslationBlock *gen_tb; /* tb for which code is being generated */
+ tcg_insn_unit *code_buf; /* pointer for start of tb */
+ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
#ifdef CONFIG_DEBUG_TCG
- int temps_in_use;
int goto_tb_issue_mask;
const TCGOpcode *vecop_list;
#endif
@@ -621,8 +511,6 @@ struct TCGContext {
here, because there's too much arithmetic throughout that relies
on addition and subtraction working on bytes. Rely on the GCC
extension that allows arithmetic on void*. */
- void *code_gen_prologue;
- void *code_gen_epilogue;
void *code_gen_buffer;
size_t code_gen_buffer_size;
void *code_gen_ptr;
@@ -631,12 +519,10 @@ struct TCGContext {
/* Threshold to flush the translated code buffer. */
void *code_gen_highwater;
- size_t tb_phys_invalidate_count;
-
/* Track which vCPU triggers events */
CPUState *cpu; /* *_trans */
- /* These structures are private to tcg-target.inc.c. */
+ /* These structures are private to tcg-target.c.inc. */
#ifdef TCG_TARGET_NEED_LDST_LABELS
QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
#endif
@@ -658,36 +544,65 @@ struct TCGContext {
/* descriptor of the instruction being translated */
struct qemu_plugin_insn *plugin_insn;
-
- /* list to quickly access the injected ops */
- QSIMPLEQ_HEAD(, TCGOp) plugin_ops;
#endif
- TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
+ GHashTable *const_table[TCG_TYPE_COUNT];
+ TCGTempSet free_temps[TCG_TYPE_COUNT];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
QTAILQ_HEAD(, TCGOp) ops, free_ops;
QSIMPLEQ_HEAD(, TCGLabel) labels;
+ /*
+ * When clear, new ops are added to the tail of @ops.
+ * When set, new ops are added in front of @emit_before_op.
+ */
+ TCGOp *emit_before_op;
+
/* Tells which temporary holds a given register.
It does not take into account fixed registers */
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
uint16_t gen_insn_end_off[TCG_MAX_INSNS];
- target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
+ uint64_t *gen_insn_data;
+
+ /* Exit to translator on overflow. */
+ sigjmp_buf jmp_trans;
};
-extern TCGContext tcg_init_ctx;
+static inline bool temp_readonly(TCGTemp *ts)
+{
+ return ts->kind >= TEMP_FIXED;
+}
+
+#ifdef CONFIG_USER_ONLY
+extern bool tcg_use_softmmu;
+#else
+#define tcg_use_softmmu true
+#endif
+
extern __thread TCGContext *tcg_ctx;
-extern TCGv_env cpu_env;
+extern const void *tcg_code_gen_epilogue;
+extern uintptr_t tcg_splitwx_diff;
+extern TCGv_env tcg_env;
-static inline size_t temp_idx(TCGTemp *ts)
+bool in_code_gen_buffer(const void *p);
+
+#ifdef CONFIG_DEBUG_TCG
+const void *tcg_splitwx_to_rx(void *rw);
+void *tcg_splitwx_to_rw(const void *rx);
+#else
+static inline const void *tcg_splitwx_to_rx(void *rw)
{
- ptrdiff_t n = ts - tcg_ctx->temps;
- tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
- return n;
+ return rw ? rw + tcg_splitwx_diff : NULL;
}
+static inline void *tcg_splitwx_to_rw(const void *rx)
+{
+ return rx ? (void *)rx - tcg_splitwx_diff : NULL;
+}
+#endif
+
static inline TCGArg temp_arg(TCGTemp *ts)
{
return (uintptr_t)ts;
@@ -698,22 +613,36 @@ static inline TCGTemp *arg_temp(TCGArg a)
return (TCGTemp *)(uintptr_t)a;
}
-/* Using the offset of a temporary, relative to TCGContext, rather than
- its index means that we don't use 0. That leaves offset 0 free for
- a NULL representation without having to leave index 0 unused. */
+#ifdef CONFIG_DEBUG_TCG
+size_t temp_idx(TCGTemp *ts);
+TCGTemp *tcgv_i32_temp(TCGv_i32 v);
+#else
+static inline size_t temp_idx(TCGTemp *ts)
+{
+ return ts - tcg_ctx->temps;
+}
+
+/*
+ * Using the offset of a temporary, relative to TCGContext, rather than
+ * its index means that we don't use 0. That leaves offset 0 free for
+ * a NULL representation without having to leave index 0 unused.
+ */
static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
{
- uintptr_t o = (uintptr_t)v;
- TCGTemp *t = (void *)tcg_ctx + o;
- tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
- return t;
+ return (void *)tcg_ctx + (uintptr_t)v;
}
+#endif
static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
{
return tcgv_i32_temp((TCGv_i32)v);
}
+static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
{
return tcgv_i32_temp((TCGv_i32)v);
@@ -734,6 +663,11 @@ static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
return temp_arg(tcgv_i64_temp(v));
}
+static inline TCGArg tcgv_i128_arg(TCGv_i128 v)
+{
+ return temp_arg(tcgv_i128_temp(v));
+}
+
static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
{
return temp_arg(tcgv_ptr_temp(v));
@@ -755,6 +689,11 @@ static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
return (TCGv_i64)temp_tcgv_i32(t);
}
+static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t)
+{
+ return (TCGv_i128)temp_tcgv_i32(t);
+}
+
static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
{
return (TCGv_ptr)temp_tcgv_i32(t);
@@ -765,31 +704,34 @@ static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
return (TCGv_vec)temp_tcgv_i32(t);
}
-#if TCG_TARGET_REG_BITS == 32
-static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
+static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
{
- return temp_tcgv_i32(tcgv_i64_temp(t));
+ return op->args[arg];
}
-static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
+static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
{
- return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
+ op->args[arg] = v;
}
-#endif
-static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
+static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg)
{
- op->args[arg] = v;
+ if (TCG_TARGET_REG_BITS == 64) {
+ return tcg_get_insn_param(op, arg);
+ } else {
+ return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32,
+ tcg_get_insn_param(op, arg * 2 + 1));
+ }
}
-static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
+static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v)
{
-#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
- tcg_set_insn_param(op, arg, v);
-#else
- tcg_set_insn_param(op, arg * 2, v);
- tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
-#endif
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_set_insn_param(op, arg, v);
+ } else {
+ tcg_set_insn_param(op, arg * 2, v);
+ tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
+ }
}
/* The last op that was emitted. */
@@ -818,8 +760,6 @@ void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);
TranslationBlock *tcg_tb_alloc(TCGContext *s);
-void tcg_region_init(void);
-void tb_destroy(TranslationBlock *tb);
void tcg_region_reset_all(void);
size_t tcg_code_size(void);
@@ -827,7 +767,6 @@ size_t tcg_code_capacity(void);
void tcg_tb_insert(TranslationBlock *tb);
void tcg_tb_remove(TranslationBlock *tb);
-size_t tcg_tb_phys_invalidate_count(void);
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
size_t tcg_nb_tbs(void);
@@ -851,133 +790,32 @@ static inline void *tcg_malloc(int size)
}
}
-void tcg_context_init(TCGContext *s);
-void tcg_register_thread(void);
-void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s);
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
-
-void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
-
-TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
- intptr_t, const char *);
-TCGTemp *tcg_temp_new_internal(TCGType, bool);
-void tcg_temp_free_internal(TCGTemp *);
-TCGv_vec tcg_temp_new_vec(TCGType type);
-TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
-
-static inline void tcg_temp_free_i32(TCGv_i32 arg)
-{
- tcg_temp_free_internal(tcgv_i32_temp(arg));
-}
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start);
-static inline void tcg_temp_free_i64(TCGv_i64 arg)
-{
- tcg_temp_free_internal(tcgv_i64_temp(arg));
-}
-
-static inline void tcg_temp_free_ptr(TCGv_ptr arg)
-{
- tcg_temp_free_internal(tcgv_ptr_temp(arg));
-}
-
-static inline void tcg_temp_free_vec(TCGv_vec arg)
-{
- tcg_temp_free_internal(tcgv_vec_temp(arg));
-}
-
-static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
- const char *name)
-{
- TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
- return temp_tcgv_i32(t);
-}
-
-static inline TCGv_i32 tcg_temp_new_i32(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
- return temp_tcgv_i32(t);
-}
+void tb_target_set_jmp_target(const TranslationBlock *, int,
+ uintptr_t, uintptr_t);
-static inline TCGv_i32 tcg_temp_local_new_i32(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
- return temp_tcgv_i32(t);
-}
-
-static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
- const char *name)
-{
- TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
- return temp_tcgv_i64(t);
-}
-
-static inline TCGv_i64 tcg_temp_new_i64(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
- return temp_tcgv_i64(t);
-}
-
-static inline TCGv_i64 tcg_temp_local_new_i64(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
- return temp_tcgv_i64(t);
-}
-
-static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
- const char *name)
-{
- TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
- return temp_tcgv_ptr(t);
-}
-
-static inline TCGv_ptr tcg_temp_new_ptr(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
- return temp_tcgv_ptr(t);
-}
-
-static inline TCGv_ptr tcg_temp_local_new_ptr(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
- return temp_tcgv_ptr(t);
-}
-
-#if defined(CONFIG_DEBUG_TCG)
-/* If you call tcg_clear_temp_count() at the start of a section of
- * code which is not supposed to leak any TCG temporaries, then
- * calling tcg_check_temp_count() at the end of the section will
- * return 1 if the section did in fact leak a temporary.
- */
-void tcg_clear_temp_count(void);
-int tcg_check_temp_count(void);
-#else
-#define tcg_clear_temp_count() do { } while (0)
-#define tcg_check_temp_count() 0
-#endif
-
-int64_t tcg_cpu_exec_time(void);
-void tcg_dump_info(void);
-void tcg_dump_op_count(void);
+void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
-#define TCG_CT_ALIAS 0x80
-#define TCG_CT_IALIAS 0x40
-#define TCG_CT_NEWREG 0x20 /* output requires a new register */
-#define TCG_CT_REG 0x01
-#define TCG_CT_CONST 0x02 /* any constant of register size */
+#define TCG_CT_CONST 1 /* any constant of register size */
typedef struct TCGArgConstraint {
- uint16_t ct;
- uint8_t alias_index;
- union {
- TCGRegSet regs;
- } u;
+ unsigned ct : 16;
+ unsigned alias_index : 4;
+ unsigned sort_index : 4;
+ unsigned pair_index : 4;
+ unsigned pair : 2; /* 0: none, 1: first, 2: second, 3: second alias */
+ bool oalias : 1;
+ bool ialias : 1;
+ bool newreg : 1;
+ TCGRegSet regs;
} TCGArgConstraint;
#define TCG_MAX_OP_ARGS 16
-/* Bits for TCGOpDef->flags, 8 bits available. */
+/* Bits for TCGOpDef->flags, 8 bits available, all used. */
enum {
/* Instruction exits the translation block. */
TCG_OPF_BB_EXIT = 0x01,
@@ -991,10 +829,12 @@ enum {
/* Instruction operands are 64-bits (otherwise 32-bits). */
TCG_OPF_64BIT = 0x10,
/* Instruction is optional and not implemented by the host, or insn
- is generic and should not be implemened by the host. */
+ is generic and should not be implemented by the host. */
TCG_OPF_NOT_PRESENT = 0x20,
/* Instruction operands are vectors. */
TCG_OPF_VECTOR = 0x40,
+ /* Instruction is a conditional branch. */
+ TCG_OPF_COND_BRANCH = 0x80
};
typedef struct TCGOpDef {
@@ -1002,10 +842,6 @@ typedef struct TCGOpDef {
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
uint8_t flags;
TCGArgConstraint *args_ct;
- int *sorted_args;
-#if defined(CONFIG_DEBUG_TCG)
- int used;
-#endif
} TCGOpDef;
extern TCGOpDef tcg_op_defs[];
@@ -1016,39 +852,40 @@ typedef struct TCGTargetOpDef {
const char *args_ct_str[TCG_MAX_OP_ARGS];
} TCGTargetOpDef;
-#define tcg_abort() \
-do {\
- fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
- abort();\
-} while (0)
-
bool tcg_op_supported(TCGOpcode op);
-void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
-
-TCGOp *tcg_emit_op(TCGOpcode opc);
+void tcg_gen_call0(TCGHelperInfo *, TCGTemp *ret);
+void tcg_gen_call1(TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
+void tcg_gen_call2(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *);
+void tcg_gen_call3(TCGHelperInfo *, TCGTemp *ret, TCGTemp *,
+ TCGTemp *, TCGTemp *);
+void tcg_gen_call4(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *);
+void tcg_gen_call5(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *);
+void tcg_gen_call6(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
+void tcg_gen_call7(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
+
+TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
void tcg_op_remove(TCGContext *s, TCGOp *op);
-TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
-TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
+ TCGOpcode opc, unsigned nargs);
+TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
+ TCGOpcode opc, unsigned nargs);
-void tcg_optimize(TCGContext *s);
-
-TCGv_i32 tcg_const_i32(int32_t val);
-TCGv_i64 tcg_const_i64(int64_t val);
-TCGv_i32 tcg_const_local_i32(int32_t val);
-TCGv_i64 tcg_const_local_i64(int64_t val);
-TCGv_vec tcg_const_zeros_vec(TCGType);
-TCGv_vec tcg_const_ones_vec(TCGType);
-TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
-TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
+/**
+ * tcg_remove_ops_after:
+ * @op: target operation
+ *
+ * Discard any opcodes emitted since @op. Expected usage is to save
+ * a starting point with tcg_last_op(), speculatively emit opcodes,
+ * then decide whether or not to keep those opcodes after the fact.
+ */
+void tcg_remove_ops_after(TCGOp *op);
-#if UINTPTR_MAX == UINT32_MAX
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
-# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
-#else
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
-# define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
-#endif
+void tcg_optimize(TCGContext *s);
TCGLabel *gen_new_label(void);
@@ -1090,7 +927,7 @@ static inline TCGLabel *arg_label(TCGArg i)
* correct result.
*/
-static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
+static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
{
return a - b;
}
@@ -1104,60 +941,35 @@ static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
* to the destination address.
*/
-static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
+static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
{
- return tcg_ptr_byte_diff(target, s->code_ptr);
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
}
/**
- * tcg_current_code_size
+ * tcg_tbrel_diff
* @s: the tcg context
+ * @target: address of the target
*
- * Compute the current code size within the translation block.
- * This is used to fill in qemu's data structures for goto_tb.
- */
-
-static inline size_t tcg_current_code_size(TCGContext *s)
-{
- return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
-}
-
-/* Combine the MemOp and mmu_idx parameters into a single value. */
-typedef uint32_t TCGMemOpIdx;
-
-/**
- * make_memop_idx
- * @op: memory operation
- * @idx: mmu index
- *
- * Encode these values into a single parameter.
+ * Produce a difference, from the beginning of the current TB code
+ * to the destination address.
*/
-static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
+static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
{
- tcg_debug_assert(idx <= 15);
- return (op << 4) | idx;
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
}
/**
- * get_memop
- * @oi: combined op/idx parameter
+ * tcg_current_code_size
+ * @s: the tcg context
*
- * Extract the memory operation from the combined value.
+ * Compute the current code size within the translation block.
+ * This is used to fill in qemu's data structures for goto_tb.
*/
-static inline MemOp get_memop(TCGMemOpIdx oi)
-{
- return oi >> 4;
-}
-/**
- * get_mmuidx
- * @oi: combined op/idx parameter
- *
- * Extract the mmu index from the combined value.
- */
-static inline unsigned get_mmuidx(TCGMemOpIdx oi)
+static inline size_t tcg_current_code_size(TCGContext *s)
{
- return oi & 15;
+ return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
}
/**
@@ -1211,14 +1023,14 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
#define TB_EXIT_IDXMAX 1
#define TB_EXIT_REQUESTED 3
-#ifdef HAVE_TCG_QEMU_TB_EXEC
-uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
+#ifdef CONFIG_TCG_INTERPRETER
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
#else
-# define tcg_qemu_tb_exec(env, tb_ptr) \
- ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
+typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
+extern tcg_prologue_fn *tcg_qemu_tb_exec;
#endif
-void tcg_register_jit(void *buf, size_t buf_size);
+void tcg_register_jit(const void *buf, size_t buf_size);
#if TCG_TARGET_MAYBE_vec
/* Return zero if the tuple (opc, type, vece) is unsupportable;
@@ -1235,7 +1047,7 @@ static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
/* Expand the tuple (opc, type, vece) on the given arguments. */
void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
-/* Replicate a constant C accoring to the log2 of the element size. */
+/* Replicate a constant C according to the log2 of the element size. */
uint64_t dup_const(unsigned vece, uint64_t c);
#define dup_const(VECE, C) \
@@ -1243,181 +1055,10 @@ uint64_t dup_const(unsigned vece, uint64_t c);
? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
: (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
: (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
- : dup_const(VECE, C)) \
+ : (VECE) == MO_64 ? (uint64_t)(C) \
+ : (qemu_build_not_reached_always(), 0)) \
: dup_const(VECE, C))
-
-/*
- * Memory helpers that will be used by TCG generated code.
- */
-#ifdef CONFIG_SOFTMMU
-/* Value zero-extended to tcg register size. */
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-/* Value sign-extended to tcg register size. */
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-/* Temporary aliases until backends are converted. */
-#ifdef TARGET_WORDS_BIGENDIAN
-# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
-# define helper_ret_lduw_mmu helper_be_lduw_mmu
-# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
-# define helper_ret_ldul_mmu helper_be_ldul_mmu
-# define helper_ret_ldl_mmu helper_be_ldul_mmu
-# define helper_ret_ldq_mmu helper_be_ldq_mmu
-# define helper_ret_stw_mmu helper_be_stw_mmu
-# define helper_ret_stl_mmu helper_be_stl_mmu
-# define helper_ret_stq_mmu helper_be_stq_mmu
-#else
-# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
-# define helper_ret_lduw_mmu helper_le_lduw_mmu
-# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
-# define helper_ret_ldul_mmu helper_le_ldul_mmu
-# define helper_ret_ldl_mmu helper_le_ldul_mmu
-# define helper_ret_ldq_mmu helper_le_ldq_mmu
-# define helper_ret_stw_mmu helper_le_stw_mmu
-# define helper_ret_stl_mmu helper_le_stl_mmu
-# define helper_ret_stq_mmu helper_le_stq_mmu
-#endif
-
-uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
- uint64_t cmpv, uint64_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
- uint32_t cmpv, uint32_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
- uint64_t cmpv, uint64_t newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
-TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
- (CPUArchState *env, target_ulong addr, TYPE val, \
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-#ifdef CONFIG_ATOMIC64
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
-#else
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
-#endif
-
-GEN_ATOMIC_HELPER_ALL(fetch_add)
-GEN_ATOMIC_HELPER_ALL(fetch_sub)
-GEN_ATOMIC_HELPER_ALL(fetch_and)
-GEN_ATOMIC_HELPER_ALL(fetch_or)
-GEN_ATOMIC_HELPER_ALL(fetch_xor)
-GEN_ATOMIC_HELPER_ALL(fetch_smin)
-GEN_ATOMIC_HELPER_ALL(fetch_umin)
-GEN_ATOMIC_HELPER_ALL(fetch_smax)
-GEN_ATOMIC_HELPER_ALL(fetch_umax)
-
-GEN_ATOMIC_HELPER_ALL(add_fetch)
-GEN_ATOMIC_HELPER_ALL(sub_fetch)
-GEN_ATOMIC_HELPER_ALL(and_fetch)
-GEN_ATOMIC_HELPER_ALL(or_fetch)
-GEN_ATOMIC_HELPER_ALL(xor_fetch)
-GEN_ATOMIC_HELPER_ALL(smin_fetch)
-GEN_ATOMIC_HELPER_ALL(umin_fetch)
-GEN_ATOMIC_HELPER_ALL(smax_fetch)
-GEN_ATOMIC_HELPER_ALL(umax_fetch)
-
-GEN_ATOMIC_HELPER_ALL(xchg)
-
-#undef GEN_ATOMIC_HELPER_ALL
-#undef GEN_ATOMIC_HELPER
-#endif /* CONFIG_SOFTMMU */
-
-/*
- * These aren't really a "proper" helpers because TCG cannot manage Int128.
- * However, use the same format as the others, for use by the backends.
- *
- * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
- * the ld/st functions are only defined if HAVE_ATOMIC128,
- * as defined by <qemu/atomic128.h>.
- */
-Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
- Int128 cmpv, Int128 newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
- Int128 cmpv, Int128 newv,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- TCGMemOpIdx oi, uintptr_t retaddr);
-
-#ifdef CONFIG_DEBUG_TCG
-void tcg_assert_listed_vecop(TCGOpcode);
-#else
-static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
-#endif
-
static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
{
#ifdef CONFIG_DEBUG_TCG
diff --git a/include/trace-tcg.h b/include/trace-tcg.h
deleted file mode 100644
index da68608c85..0000000000
--- a/include/trace-tcg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef TRACE_TCG_H
-#define TRACE_TCG_H
-
-#include "trace/generated-tcg-tracers.h"
-
-#endif /* TRACE_TCG_H */
diff --git a/include/ui/clipboard.h b/include/ui/clipboard.h
new file mode 100644
index 0000000000..ab6acdbd8a
--- /dev/null
+++ b/include/ui/clipboard.h
@@ -0,0 +1,277 @@
+#ifndef QEMU_CLIPBOARD_H
+#define QEMU_CLIPBOARD_H
+
+#include "qemu/notify.h"
+
+/**
+ * DOC: Introduction
+ *
+ * The header ``ui/clipboard.h`` declares the qemu clipboard interface.
+ *
+ * All qemu elements which want use the clipboard can register as
+ * clipboard peer. Subsequently they can set the clipboard content
+ * and get notifications for clipboard updates.
+ *
+ * Typical users are user interfaces (gtk), remote access protocols
+ * (vnc) and devices talking to the guest (vdagent).
+ *
+ * Even though the design allows different data types only plain text
+ * is supported for now.
+ */
+
+typedef enum QemuClipboardType QemuClipboardType;
+typedef enum QemuClipboardNotifyType QemuClipboardNotifyType;
+typedef enum QemuClipboardSelection QemuClipboardSelection;
+typedef struct QemuClipboardPeer QemuClipboardPeer;
+typedef struct QemuClipboardNotify QemuClipboardNotify;
+typedef struct QemuClipboardInfo QemuClipboardInfo;
+
+/**
+ * enum QemuClipboardType
+ *
+ * @QEMU_CLIPBOARD_TYPE_TEXT: text/plain; charset=utf-8
+ * @QEMU_CLIPBOARD_TYPE__COUNT: type count.
+ */
+enum QemuClipboardType {
+ QEMU_CLIPBOARD_TYPE_TEXT,
+ QEMU_CLIPBOARD_TYPE__COUNT,
+};
+
+/* same as VD_AGENT_CLIPBOARD_SELECTION_* */
+/**
+ * enum QemuClipboardSelection
+ *
+ * @QEMU_CLIPBOARD_SELECTION_CLIPBOARD: clipboard (explitcit cut+paste).
+ * @QEMU_CLIPBOARD_SELECTION_PRIMARY: primary selection (select + middle mouse button).
+ * @QEMU_CLIPBOARD_SELECTION_SECONDARY: secondary selection (dunno).
+ * @QEMU_CLIPBOARD_SELECTION__COUNT: selection count.
+ */
+enum QemuClipboardSelection {
+ QEMU_CLIPBOARD_SELECTION_CLIPBOARD,
+ QEMU_CLIPBOARD_SELECTION_PRIMARY,
+ QEMU_CLIPBOARD_SELECTION_SECONDARY,
+ QEMU_CLIPBOARD_SELECTION__COUNT,
+};
+
+/**
+ * struct QemuClipboardPeer
+ *
+ * @name: peer name.
+ * @notifier: notifier for clipboard updates.
+ * @request: callback for clipboard data requests.
+ *
+ * Clipboard peer description.
+ */
+struct QemuClipboardPeer {
+ const char *name;
+ Notifier notifier;
+ void (*request)(QemuClipboardInfo *info,
+ QemuClipboardType type);
+};
+
+/**
+ * enum QemuClipboardNotifyType
+ *
+ * @QEMU_CLIPBOARD_UPDATE_INFO: clipboard info update
+ * @QEMU_CLIPBOARD_RESET_SERIAL: reset clipboard serial
+ *
+ * Clipboard notify type.
+ */
+enum QemuClipboardNotifyType {
+ QEMU_CLIPBOARD_UPDATE_INFO,
+ QEMU_CLIPBOARD_RESET_SERIAL,
+};
+
+/**
+ * struct QemuClipboardNotify
+ *
+ * @type: the type of event.
+ * @info: a QemuClipboardInfo event.
+ *
+ * Clipboard notify data.
+ */
+struct QemuClipboardNotify {
+ QemuClipboardNotifyType type;
+ union {
+ QemuClipboardInfo *info;
+ };
+};
+
+/**
+ * struct QemuClipboardInfo
+ *
+ * @refcount: reference counter.
+ * @owner: clipboard owner.
+ * @selection: clipboard selection.
+ * @types: clipboard data array (one entry per type).
+ * @has_serial: whether @serial is available.
+ * @serial: the grab serial counter.
+ *
+ * Clipboard content data and metadata.
+ */
+struct QemuClipboardInfo {
+ uint32_t refcount;
+ QemuClipboardPeer *owner;
+ QemuClipboardSelection selection;
+ bool has_serial;
+ uint32_t serial;
+ struct {
+ bool available;
+ bool requested;
+ size_t size;
+ void *data;
+ } types[QEMU_CLIPBOARD_TYPE__COUNT];
+};
+
+/**
+ * qemu_clipboard_peer_register
+ *
+ * @peer: peer information.
+ *
+ * Register clipboard peer. Registering is needed for both active
+ * (set+grab clipboard) and passive (watch clipboard for updates)
+ * interaction with the qemu clipboard.
+ */
+void qemu_clipboard_peer_register(QemuClipboardPeer *peer);
+
+/**
+ * qemu_clipboard_peer_unregister
+ *
+ * @peer: peer information.
+ *
+ * Unregister clipboard peer.
+ */
+void qemu_clipboard_peer_unregister(QemuClipboardPeer *peer);
+
+/**
+ * qemu_clipboard_peer_owns
+ *
+ * @peer: peer information.
+ * @selection: clipboard selection.
+ *
+ * Return TRUE if the peer owns the clipboard.
+ */
+bool qemu_clipboard_peer_owns(QemuClipboardPeer *peer,
+ QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_peer_release
+ *
+ * @peer: peer information.
+ * @selection: clipboard selection.
+ *
+ * If the peer owns the clipboard, release it.
+ */
+void qemu_clipboard_peer_release(QemuClipboardPeer *peer,
+ QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_info
+ *
+ * @selection: clipboard selection.
+ *
+ * Return the current clipboard data & owner information.
+ */
+QemuClipboardInfo *qemu_clipboard_info(QemuClipboardSelection selection);
+
+/**
+ * qemu_clipboard_check_serial
+ *
+ * @info: clipboard info.
+ * @client: whether to check from the client context and priority.
+ *
+ * Return TRUE if the @info has a higher serial than the current clipboard.
+ */
+bool qemu_clipboard_check_serial(QemuClipboardInfo *info, bool client);
+
+/**
+ * qemu_clipboard_info_new
+ *
+ * @owner: clipboard owner.
+ * @selection: clipboard selection.
+ *
+ * Allocate a new QemuClipboardInfo and initialize it with the given
+ * @owner and @selection.
+ *
+ * QemuClipboardInfo is a reference-counted struct. The new struct is
+ * returned with a reference already taken (i.e. reference count is
+ * one).
+ */
+QemuClipboardInfo *qemu_clipboard_info_new(QemuClipboardPeer *owner,
+ QemuClipboardSelection selection);
+/**
+ * qemu_clipboard_info_ref
+ *
+ * @info: clipboard info.
+ *
+ * Increase @info reference count.
+ */
+QemuClipboardInfo *qemu_clipboard_info_ref(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_info_unref
+ *
+ * @info: clipboard info.
+ *
+ * Decrease @info reference count. When the count goes down to zero
+ * free the @info struct itself and all clipboard data.
+ */
+void qemu_clipboard_info_unref(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_update
+ *
+ * @info: clipboard info.
+ *
+ * Update the qemu clipboard. Notify all registered peers (including
+ * the clipboard owner) that the qemu clipboard has been updated.
+ *
+ * This is used for both new completely clipboard content and for
+ * clipboard data updates in response to qemu_clipboard_request()
+ * calls.
+ */
+void qemu_clipboard_update(QemuClipboardInfo *info);
+
+/**
+ * qemu_clipboard_reset_serial
+ *
+ * Reset the clipboard serial.
+ */
+void qemu_clipboard_reset_serial(void);
+
+/**
+ * qemu_clipboard_request
+ *
+ * @info: clipboard info.
+ * @type: clipboard data type.
+ *
+ * Request clipboard content. Typically the clipboard owner only
+ * advertises the available data types and provides the actual data
+ * only on request.
+ */
+void qemu_clipboard_request(QemuClipboardInfo *info,
+ QemuClipboardType type);
+
+/**
+ * qemu_clipboard_set_data
+ *
+ * @peer: clipboard peer.
+ * @info: clipboard info.
+ * @type: clipboard data type.
+ * @size: data size.
+ * @data: data blob.
+ * @update: notify peers about the update.
+ *
+ * Set clipboard content for the given @type. This function will make
+ * a copy of the content data and store that.
+ */
+void qemu_clipboard_set_data(QemuClipboardPeer *peer,
+ QemuClipboardInfo *info,
+ QemuClipboardType type,
+ uint32_t size,
+ const void *data,
+ bool update);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuClipboardInfo, qemu_clipboard_info_unref)
+
+#endif /* QEMU_CLIPBOARD_H */
diff --git a/include/ui/console.h b/include/ui/console.h
index f35b4fc082..0bc7a00ac0 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -4,13 +4,30 @@
#include "ui/qemu-pixman.h"
#include "qom/object.h"
#include "qemu/notify.h"
-#include "qemu/error-report.h"
#include "qapi/qapi-types-ui.h"
+#include "ui/input.h"
+#include "ui/surface.h"
-#ifdef CONFIG_OPENGL
-# include <epoxy/gl.h>
-# include "ui/shader.h"
-#endif
+#define TYPE_QEMU_CONSOLE "qemu-console"
+OBJECT_DECLARE_TYPE(QemuConsole, QemuConsoleClass, QEMU_CONSOLE)
+
+#define TYPE_QEMU_GRAPHIC_CONSOLE "qemu-graphic-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuGraphicConsole, QEMU_GRAPHIC_CONSOLE)
+
+#define TYPE_QEMU_TEXT_CONSOLE "qemu-text-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuTextConsole, QEMU_TEXT_CONSOLE)
+
+#define TYPE_QEMU_FIXED_TEXT_CONSOLE "qemu-fixed-text-console"
+OBJECT_DECLARE_SIMPLE_TYPE(QemuFixedTextConsole, QEMU_FIXED_TEXT_CONSOLE)
+
+#define QEMU_IS_GRAPHIC_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_GRAPHIC_CONSOLE)
+
+#define QEMU_IS_TEXT_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_TEXT_CONSOLE)
+
+#define QEMU_IS_FIXED_TEXT_CONSOLE(c) \
+ object_dynamic_cast(OBJECT(c), TYPE_QEMU_FIXED_TEXT_CONSOLE)
/* keyboard/mouse support */
@@ -65,19 +82,12 @@ void qemu_remove_led_event_handler(QEMUPutLEDEntry *entry);
void kbd_put_ledstate(int ledstate);
-typedef struct MouseTransformInfo {
- /* Touchscreen resolution */
- int x;
- int y;
- /* Calibration values as used/generated by tslib */
- int a[7];
-} MouseTransformInfo;
-
-void hmp_mouse_set(Monitor *mon, const QDict *qdict);
+bool qemu_mouse_set(int index, Error **errp);
/* keysym is a unicode code except for special keys (see QEMU_KEY_xxx
constants) */
#define QEMU_KEY_ESC1(c) ((c) | 0xe100)
+#define QEMU_KEY_TAB 0x0009
#define QEMU_KEY_BACKSPACE 0x007f
#define QEMU_KEY_UP QEMU_KEY_ESC1('A')
#define QEMU_KEY_DOWN QEMU_KEY_ESC1('B')
@@ -98,59 +108,65 @@ void hmp_mouse_set(Monitor *mon, const QDict *qdict);
#define QEMU_KEY_CTRL_PAGEUP 0xe406
#define QEMU_KEY_CTRL_PAGEDOWN 0xe407
-void kbd_put_keysym_console(QemuConsole *s, int keysym);
-bool kbd_put_qcode_console(QemuConsole *s, int qcode, bool ctrl);
-void kbd_put_string_console(QemuConsole *s, const char *str, int len);
-void kbd_put_keysym(int keysym);
+void qemu_text_console_put_keysym(QemuTextConsole *s, int keysym);
+bool qemu_text_console_put_qcode(QemuTextConsole *s, int qcode, bool ctrl);
+void qemu_text_console_put_string(QemuTextConsole *s, const char *str, int len);
+/* Touch devices */
+typedef struct touch_slot {
+ int x;
+ int y;
+ int tracking_id;
+} touch_slot;
+
+void console_handle_touch_event(QemuConsole *con,
+ struct touch_slot touch_slots[INPUT_EVENT_SLOTS_MAX],
+ uint64_t num_slot,
+ int width, int height,
+ double x, double y,
+ InputMultiTouchType type,
+ Error **errp);
/* consoles */
-#define TYPE_QEMU_CONSOLE "qemu-console"
-#define QEMU_CONSOLE(obj) \
- OBJECT_CHECK(QemuConsole, (obj), TYPE_QEMU_CONSOLE)
-#define QEMU_CONSOLE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(QemuConsoleClass, (obj), TYPE_QEMU_CONSOLE)
-#define QEMU_CONSOLE_CLASS(klass) \
- OBJECT_CLASS_CHECK(QemuConsoleClass, (klass), TYPE_QEMU_CONSOLE)
-
-typedef struct QemuConsoleClass QemuConsoleClass;
-
struct QemuConsoleClass {
ObjectClass parent_class;
};
-#define QEMU_ALLOCATED_FLAG 0x01
-
-typedef struct DisplaySurface {
- pixman_format_code_t format;
- pixman_image_t *image;
- uint8_t flags;
-#ifdef CONFIG_OPENGL
- GLenum glformat;
- GLenum gltype;
- GLuint texture;
-#endif
-} DisplaySurface;
+typedef struct ScanoutTexture {
+ uint32_t backing_id;
+ bool backing_y_0_top;
+ uint32_t backing_width;
+ uint32_t backing_height;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ void *d3d_tex2d;
+} ScanoutTexture;
typedef struct QemuUIInfo {
+ /* physical dimension */
+ uint16_t width_mm;
+ uint16_t height_mm;
/* geometry */
int xoff;
int yoff;
uint32_t width;
uint32_t height;
+ uint32_t refresh_rate;
} QemuUIInfo;
/* cursor data format is 32bit RGBA */
typedef struct QEMUCursor {
- int width, height;
+ uint16_t width, height;
int hot_x, hot_y;
int refcount;
uint32_t data[];
} QEMUCursor;
-QEMUCursor *cursor_alloc(int width, int height);
-void cursor_get(QEMUCursor *c);
-void cursor_put(QEMUCursor *c);
+QEMUCursor *cursor_alloc(uint16_t width, uint16_t height);
+QEMUCursor *cursor_ref(QEMUCursor *c);
+void cursor_unref(QEMUCursor *c);
QEMUCursor *cursor_builtin_hidden(void);
QEMUCursor *cursor_builtin_left_ptr(void);
void cursor_print_ascii_art(QEMUCursor *c, const char *prefix);
@@ -177,60 +193,96 @@ typedef struct QemuDmaBuf {
uint32_t fourcc;
uint64_t modifier;
uint32_t texture;
+ uint32_t x;
+ uint32_t y;
+ uint32_t backing_width;
+ uint32_t backing_height;
bool y0_top;
+ void *sync;
+ int fence_fd;
+ bool allow_fences;
+ bool draw_submitted;
} QemuDmaBuf;
+enum display_scanout {
+ SCANOUT_NONE,
+ SCANOUT_SURFACE,
+ SCANOUT_TEXTURE,
+ SCANOUT_DMABUF,
+};
+
+typedef struct DisplayScanout {
+ enum display_scanout kind;
+ union {
+ /* DisplaySurface *surface; is kept in QemuConsole */
+ ScanoutTexture texture;
+ QemuDmaBuf *dmabuf;
+ };
+} DisplayScanout;
+
typedef struct DisplayState DisplayState;
+typedef struct DisplayGLCtx DisplayGLCtx;
typedef struct DisplayChangeListenerOps {
const char *dpy_name;
+ /* optional */
void (*dpy_refresh)(DisplayChangeListener *dcl);
+ /* optional */
void (*dpy_gfx_update)(DisplayChangeListener *dcl,
int x, int y, int w, int h);
+ /* optional */
void (*dpy_gfx_switch)(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface);
+ /* optional */
bool (*dpy_gfx_check_format)(DisplayChangeListener *dcl,
pixman_format_code_t format);
+ /* optional */
void (*dpy_text_cursor)(DisplayChangeListener *dcl,
int x, int y);
+ /* optional */
void (*dpy_text_resize)(DisplayChangeListener *dcl,
int w, int h);
+ /* optional */
void (*dpy_text_update)(DisplayChangeListener *dcl,
int x, int y, int w, int h);
+ /* optional */
void (*dpy_mouse_set)(DisplayChangeListener *dcl,
int x, int y, int on);
+ /* optional */
void (*dpy_cursor_define)(DisplayChangeListener *dcl,
QEMUCursor *cursor);
- QEMUGLContext (*dpy_gl_ctx_create)(DisplayChangeListener *dcl,
- QEMUGLParams *params);
- void (*dpy_gl_ctx_destroy)(DisplayChangeListener *dcl,
- QEMUGLContext ctx);
- int (*dpy_gl_ctx_make_current)(DisplayChangeListener *dcl,
- QEMUGLContext ctx);
- QEMUGLContext (*dpy_gl_ctx_get_current)(DisplayChangeListener *dcl);
-
+ /* required if GL */
void (*dpy_gl_scanout_disable)(DisplayChangeListener *dcl);
+ /* required if GL */
void (*dpy_gl_scanout_texture)(DisplayChangeListener *dcl,
uint32_t backing_id,
bool backing_y_0_top,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
+ /* optional (default to true if has dpy_gl_scanout_dmabuf) */
+ bool (*dpy_has_dmabuf)(DisplayChangeListener *dcl);
+ /* optional */
void (*dpy_gl_scanout_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
+ /* optional */
void (*dpy_gl_cursor_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf, bool have_hot,
uint32_t hot_x, uint32_t hot_y);
+ /* optional */
void (*dpy_gl_cursor_position)(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y);
+ /* optional */
void (*dpy_gl_release_dmabuf)(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
+ /* required if GL */
void (*dpy_gl_update)(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
@@ -245,40 +297,41 @@ struct DisplayChangeListener {
QLIST_ENTRY(DisplayChangeListener) next;
};
-DisplayState *init_displaystate(void);
-DisplaySurface *qemu_create_displaysurface_from(int width, int height,
- pixman_format_code_t format,
- int linesize, uint8_t *data);
-DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
-DisplaySurface *qemu_create_message_surface(int w, int h,
- const char *msg);
-PixelFormat qemu_default_pixelformat(int bpp);
-
-DisplaySurface *qemu_create_displaysurface(int width, int height);
-void qemu_free_displaysurface(DisplaySurface *surface);
-
-static inline int is_surface_bgr(DisplaySurface *surface)
-{
- if (PIXMAN_FORMAT_BPP(surface->format) == 32 &&
- PIXMAN_FORMAT_TYPE(surface->format) == PIXMAN_TYPE_ABGR) {
- return 1;
- } else {
- return 0;
- }
-}
+typedef struct DisplayGLCtxOps {
+ bool (*dpy_gl_ctx_is_compatible_dcl)(DisplayGLCtx *dgc,
+ DisplayChangeListener *dcl);
+ QEMUGLContext (*dpy_gl_ctx_create)(DisplayGLCtx *dgc,
+ QEMUGLParams *params);
+ void (*dpy_gl_ctx_destroy)(DisplayGLCtx *dgc,
+ QEMUGLContext ctx);
+ int (*dpy_gl_ctx_make_current)(DisplayGLCtx *dgc,
+ QEMUGLContext ctx);
+ void (*dpy_gl_ctx_create_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface);
+ void (*dpy_gl_ctx_destroy_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface);
+ void (*dpy_gl_ctx_update_texture)(DisplayGLCtx *dgc,
+ DisplaySurface *surface,
+ int x, int y, int w, int h);
+} DisplayGLCtxOps;
+
+struct DisplayGLCtx {
+ const DisplayGLCtxOps *ops;
+#ifdef CONFIG_OPENGL
+ QemuGLShader *gls; /* optional shared shader */
+#endif
+};
-static inline int is_buffer_shared(DisplaySurface *surface)
-{
- return !(surface->flags & QEMU_ALLOCATED_FLAG);
-}
+DisplayState *init_displaystate(void);
void register_displaychangelistener(DisplayChangeListener *dcl);
void update_displaychangelistener(DisplayChangeListener *dcl,
uint64_t interval);
void unregister_displaychangelistener(DisplayChangeListener *dcl);
-bool dpy_ui_info_supported(QemuConsole *con);
-int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info);
+bool dpy_ui_info_supported(const QemuConsole *con);
+const QemuUIInfo *dpy_get_ui_info(const QemuConsole *con);
+int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info, bool delay);
void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h);
void dpy_gfx_update_full(QemuConsole *con);
@@ -297,7 +350,8 @@ void dpy_gl_scanout_disable(QemuConsole *con);
void dpy_gl_scanout_texture(QemuConsole *con,
uint32_t backing_id, bool backing_y_0_top,
uint32_t backing_width, uint32_t backing_height,
- uint32_t x, uint32_t y, uint32_t w, uint32_t h);
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void dpy_gl_scanout_dmabuf(QemuConsole *con,
QemuDmaBuf *dmabuf);
void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf,
@@ -313,47 +367,8 @@ QEMUGLContext dpy_gl_ctx_create(QemuConsole *con,
QEMUGLParams *params);
void dpy_gl_ctx_destroy(QemuConsole *con, QEMUGLContext ctx);
int dpy_gl_ctx_make_current(QemuConsole *con, QEMUGLContext ctx);
-QEMUGLContext dpy_gl_ctx_get_current(QemuConsole *con);
bool console_has_gl(QemuConsole *con);
-bool console_has_gl_dmabuf(QemuConsole *con);
-
-static inline int surface_stride(DisplaySurface *s)
-{
- return pixman_image_get_stride(s->image);
-}
-
-static inline void *surface_data(DisplaySurface *s)
-{
- return pixman_image_get_data(s->image);
-}
-
-static inline int surface_width(DisplaySurface *s)
-{
- return pixman_image_get_width(s->image);
-}
-
-static inline int surface_height(DisplaySurface *s)
-{
- return pixman_image_get_height(s->image);
-}
-
-static inline int surface_bits_per_pixel(DisplaySurface *s)
-{
- int bits = PIXMAN_FORMAT_BPP(s->format);
- return bits;
-}
-
-static inline int surface_bytes_per_pixel(DisplaySurface *s)
-{
- int bits = PIXMAN_FORMAT_BPP(s->format);
- return DIV_ROUND_UP(bits, 8);
-}
-
-static inline pixman_format_code_t surface_format(DisplaySurface *s)
-{
- return s->format;
-}
typedef uint32_t console_ch_t;
@@ -362,13 +377,21 @@ static inline void console_write_ch(console_ch_t *dest, uint32_t ch)
*dest = ch;
}
+enum {
+ GRAPHIC_FLAGS_NONE = 0,
+ /* require a console/display with GL callbacks */
+ GRAPHIC_FLAGS_GL = 1 << 0,
+ /* require a console/display with DMABUF import */
+ GRAPHIC_FLAGS_DMABUF = 1 << 1,
+};
+
typedef struct GraphicHwOps {
+ int (*get_flags)(void *opaque); /* optional, default 0 */
void (*invalidate)(void *opaque);
void (*gfx_update)(void *opaque);
bool gfx_update_async; /* if true, calls graphic_hw_update_done() */
void (*text_update)(void *opaque, console_ch_t *text);
- void (*update_interval)(void *opaque, uint64_t interval);
- int (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info);
+ void (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info);
void (*gl_block)(void *opaque, bool block);
} GraphicHwOps;
@@ -388,11 +411,14 @@ void graphic_hw_gl_block(QemuConsole *con, bool block);
void qemu_console_early_init(void);
+void qemu_console_set_display_gl_ctx(QemuConsole *con, DisplayGLCtx *ctx);
+
+QemuConsole *qemu_console_lookup_default(void);
QemuConsole *qemu_console_lookup_by_index(unsigned int index);
QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head);
QemuConsole *qemu_console_lookup_by_device_name(const char *device_id,
uint32_t head, Error **errp);
-QemuConsole *qemu_console_lookup_unused(void);
+QEMUCursor *qemu_console_get_cursor(QemuConsole *con);
bool qemu_console_is_visible(QemuConsole *con);
bool qemu_console_is_graphic(QemuConsole *con);
bool qemu_console_is_fixedsize(QemuConsole *con);
@@ -400,7 +426,6 @@ bool qemu_console_is_gl_blocked(QemuConsole *con);
char *qemu_console_get_label(QemuConsole *con);
int qemu_console_get_index(QemuConsole *con);
uint32_t qemu_console_get_head(QemuConsole *con);
-QemuUIInfo *qemu_console_get_ui_info(QemuConsole *con);
int qemu_console_get_width(QemuConsole *con, int fallback);
int qemu_console_get_height(QemuConsole *con, int fallback);
/* Return the low-level window id for the console */
@@ -408,9 +433,10 @@ int qemu_console_get_window_id(QemuConsole *con);
/* Set the low-level window id for the console */
void qemu_console_set_window_id(QemuConsole *con, int window_id);
-void console_select(unsigned int index);
void qemu_console_resize(QemuConsole *con, int width, int height);
DisplaySurface *qemu_console_surface(QemuConsole *con);
+void coroutine_fn qemu_console_co_wait_update(QemuConsole *con);
+int qemu_invalidate_text_consoles(void);
/* console-gl.c */
#ifdef CONFIG_OPENGL
@@ -436,12 +462,14 @@ struct QemuDisplay {
DisplayType type;
void (*early_init)(DisplayOptions *opts);
void (*init)(DisplayState *ds, DisplayOptions *opts);
+ const char *vc;
};
void qemu_display_register(QemuDisplay *ui);
bool qemu_display_find_default(DisplayOptions *opts);
void qemu_display_early_init(DisplayOptions *opts);
void qemu_display_init(DisplayState *ds, DisplayOptions *opts);
+const char *qemu_display_get_vc(DisplayOptions *opts);
void qemu_display_help(void);
/* vnc.c */
@@ -450,10 +478,23 @@ void vnc_display_open(const char *id, Error **errp);
void vnc_display_add_client(const char *id, int csock, bool skipauth);
int vnc_display_password(const char *id, const char *password);
int vnc_display_pw_expire(const char *id, time_t expires);
-QemuOpts *vnc_parse(const char *str, Error **errp);
+void vnc_parse(const char *str);
int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp);
+bool vnc_display_reload_certs(const char *id, Error **errp);
+bool vnc_display_update(DisplayUpdateOptionsVNC *arg, Error **errp);
/* input.c */
int index_from_key(const char *key, size_t key_length);
+#ifdef CONFIG_LINUX
+/* udmabuf.c */
+int udmabuf_fd(void);
+#endif
+
+/* util.c */
+bool qemu_console_fill_device_address(QemuConsole *con,
+ char *device_address,
+ size_t size,
+ Error **errp);
+
#endif
diff --git a/include/ui/dbus-display.h b/include/ui/dbus-display.h
new file mode 100644
index 0000000000..7c9ec1a069
--- /dev/null
+++ b/include/ui/dbus-display.h
@@ -0,0 +1,17 @@
+#ifndef DBUS_DISPLAY_H
+#define DBUS_DISPLAY_H
+
+#include "qapi/error.h"
+#include "ui/dbus-module.h"
+
+static inline bool qemu_using_dbus_display(Error **errp)
+{
+ if (!using_dbus_display) {
+ error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
+ "D-Bus display is not in use");
+ return false;
+ }
+ return true;
+}
+
+#endif /* DBUS_DISPLAY_H */
diff --git a/include/ui/dbus-module.h b/include/ui/dbus-module.h
new file mode 100644
index 0000000000..5442ee0bb6
--- /dev/null
+++ b/include/ui/dbus-module.h
@@ -0,0 +1,11 @@
+#ifndef DBUS_MODULE_H
+#define DBUS_MODULE_H
+
+struct QemuDBusDisplayOps {
+ bool (*add_client)(int csock, Error **errp);
+};
+
+extern int using_dbus_display;
+extern struct QemuDBusDisplayOps qemu_dbus_display;
+
+#endif /* DBUS_MODULE_H */
diff --git a/include/ui/egl-context.h b/include/ui/egl-context.h
index f004ce11a7..c2761d747a 100644
--- a/include/ui/egl-context.h
+++ b/include/ui/egl-context.h
@@ -4,11 +4,10 @@
#include "ui/console.h"
#include "ui/egl-helpers.h"
-QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext qemu_egl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx);
-int qemu_egl_make_context_current(DisplayChangeListener *dcl,
+void qemu_egl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx);
+int qemu_egl_make_context_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
-QEMUGLContext qemu_egl_get_current_context(DisplayChangeListener *dcl);
#endif /* EGL_CONTEXT_H */
diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h
index 94a4b3e6f3..4b8c0d2281 100644
--- a/include/ui/egl-helpers.h
+++ b/include/ui/egl-helpers.h
@@ -3,13 +3,16 @@
#include <epoxy/gl.h>
#include <epoxy/egl.h>
+#ifdef CONFIG_GBM
#include <gbm.h>
+#endif
#include "ui/console.h"
#include "ui/shader.h"
extern EGLDisplay *qemu_egl_display;
extern EGLConfig qemu_egl_config;
extern DisplayGLMode qemu_egl_mode;
+extern bool qemu_egl_angle_d3d;
typedef struct egl_fb {
int width;
@@ -17,8 +20,11 @@ typedef struct egl_fb {
GLuint texture;
GLuint framebuffer;
bool delete_texture;
+ QemuDmaBuf *dmabuf;
} egl_fb;
+#define EGL_FB_INIT { 0, }
+
void egl_fb_destroy(egl_fb *fb);
void egl_fb_setup_default(egl_fb *fb, int width, int height);
void egl_fb_setup_for_tex(egl_fb *fb, int width, int height,
@@ -26,16 +32,18 @@ void egl_fb_setup_for_tex(egl_fb *fb, int width, int height,
void egl_fb_setup_new_tex(egl_fb *fb, int width, int height);
void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip);
void egl_fb_read(DisplaySurface *dst, egl_fb *src);
+void egl_fb_read_rect(DisplaySurface *dst, egl_fb *src, int x, int y, int w, int h);
void egl_texture_blit(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip);
void egl_texture_blend(QemuGLShader *gls, egl_fb *dst, egl_fb *src, bool flip,
int x, int y, double scale_x, double scale_y);
-#ifdef CONFIG_OPENGL_DMABUF
+extern EGLContext qemu_egl_rn_ctx;
+
+#ifdef CONFIG_GBM
extern int qemu_egl_rn_fd;
extern struct gbm_device *qemu_egl_rn_gbm_dev;
-extern EGLContext qemu_egl_rn_ctx;
int egl_rendernode_init(const char *rendernode, DisplayGLMode mode);
int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
@@ -43,13 +51,29 @@ int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc,
void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf);
void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_sync(QemuDmaBuf *dmabuf);
+void egl_dmabuf_create_fence(QemuDmaBuf *dmabuf);
#endif
EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win);
+#if defined(CONFIG_X11) || defined(CONFIG_GBM)
+
int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy, DisplayGLMode mode);
int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy, DisplayGLMode mode);
+
+#endif
+
+#ifdef WIN32
+int qemu_egl_init_dpy_win32(EGLNativeDisplayType dpy, DisplayGLMode mode);
+#endif
+
EGLContext qemu_egl_init_ctx(void);
+bool qemu_egl_has_dmabuf(void);
+
+bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp);
+
+const char *qemu_egl_get_error_string(void);
#endif /* EGL_HELPERS_H */
diff --git a/include/ui/gtk.h b/include/ui/gtk.h
index eaeb450f91..aa3d637029 100644
--- a/include/ui/gtk.h
+++ b/include/ui/gtk.h
@@ -18,18 +18,24 @@
#include <gdk/gdkwayland.h>
#endif
+#include "ui/clipboard.h"
+#include "ui/console.h"
#include "ui/kbd-state.h"
#if defined(CONFIG_OPENGL)
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
#endif
+#ifdef CONFIG_VTE
+#include "qemu/fifo8.h"
+#endif
-#define MILLISEC_PER_SEC 1000000
+#define MAX_VCS 10
typedef struct GtkDisplayState GtkDisplayState;
typedef struct VirtualGfxConsole {
GtkWidget *drawing_area;
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
QKbdState *kbd;
DisplaySurface *ds;
@@ -50,6 +56,7 @@ typedef struct VirtualGfxConsole {
int cursor_y;
bool y0_top;
bool scanout_mode;
+ bool has_dmabuf;
#endif
} VirtualGfxConsole;
@@ -59,6 +66,7 @@ typedef struct VirtualVteConsole {
GtkWidget *scrollbar;
GtkWidget *terminal;
Chardev *chr;
+ Fifo8 out_fifo;
bool echo;
} VirtualVteConsole;
#endif
@@ -84,10 +92,71 @@ typedef struct VirtualConsole {
};
} VirtualConsole;
+struct GtkDisplayState {
+ GtkWidget *window;
+
+ GtkWidget *menu_bar;
+
+ GtkAccelGroup *accel_group;
+
+ GtkWidget *machine_menu_item;
+ GtkWidget *machine_menu;
+ GtkWidget *pause_item;
+ GtkWidget *reset_item;
+ GtkWidget *powerdown_item;
+ GtkWidget *quit_item;
+
+ GtkWidget *view_menu_item;
+ GtkWidget *view_menu;
+ GtkWidget *full_screen_item;
+ GtkWidget *copy_item;
+ GtkWidget *zoom_in_item;
+ GtkWidget *zoom_out_item;
+ GtkWidget *zoom_fixed_item;
+ GtkWidget *zoom_fit_item;
+ GtkWidget *grab_item;
+ GtkWidget *grab_on_hover_item;
+
+ int nb_vcs;
+ VirtualConsole vc[MAX_VCS];
+
+ GtkWidget *show_tabs_item;
+ GtkWidget *untabify_item;
+ GtkWidget *show_menubar_item;
+
+ GtkWidget *vbox;
+ GtkWidget *notebook;
+ int button_mask;
+ gboolean last_set;
+ int last_x;
+ int last_y;
+ int grab_x_root;
+ int grab_y_root;
+ VirtualConsole *kbd_owner;
+ VirtualConsole *ptr_owner;
+
+ gboolean full_screen;
+
+ GdkCursor *null_cursor;
+ Notifier mouse_mode_notifier;
+ gboolean free_scale;
+
+ bool external_pause_update;
+
+ QemuClipboardPeer cbpeer;
+ uint32_t cbpending[QEMU_CLIPBOARD_SELECTION__COUNT];
+ GtkClipboard *gtkcb[QEMU_CLIPBOARD_SELECTION__COUNT];
+ bool cbowner[QEMU_CLIPBOARD_SELECTION__COUNT];
+
+ DisplayOptions *opts;
+};
+
extern bool gtk_use_gl_area;
/* ui/gtk.c */
void gd_update_windowsize(VirtualConsole *vc);
+void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget);
+void gd_hw_gl_flushed(void *vc);
/* ui/gtk-egl.c */
void gd_egl_init(VirtualConsole *vc);
@@ -97,7 +166,7 @@ void gd_egl_update(DisplayChangeListener *dcl,
void gd_egl_refresh(DisplayChangeListener *dcl);
void gd_egl_switch(DisplayChangeListener *dcl,
DisplaySurface *surface);
-QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext gd_egl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
void gd_egl_scanout_disable(DisplayChangeListener *dcl);
void gd_egl_scanout_texture(DisplayChangeListener *dcl,
@@ -106,7 +175,8 @@ void gd_egl_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl,
QemuDmaBuf *dmabuf);
void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
@@ -114,12 +184,12 @@ void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl,
uint32_t hot_x, uint32_t hot_y);
void gd_egl_cursor_position(DisplayChangeListener *dcl,
uint32_t pos_x, uint32_t pos_y);
-void gd_egl_release_dmabuf(DisplayChangeListener *dcl,
- QemuDmaBuf *dmabuf);
+void gd_egl_flush(DisplayChangeListener *dcl,
+ uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gd_egl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gtk_egl_init(DisplayGLMode mode);
-int gd_egl_make_current(DisplayChangeListener *dcl,
+int gd_egl_make_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
/* ui/gtk-gl-area.c */
@@ -130,22 +200,28 @@ void gd_gl_area_update(DisplayChangeListener *dcl,
void gd_gl_area_refresh(DisplayChangeListener *dcl);
void gd_gl_area_switch(DisplayChangeListener *dcl,
DisplaySurface *surface);
-QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl,
+QEMUGLContext gd_gl_area_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void gd_gl_area_destroy_context(DisplayChangeListener *dcl,
+void gd_gl_area_destroy_context(DisplayGLCtx *dgc,
QEMUGLContext ctx);
+void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl,
+ QemuDmaBuf *dmabuf);
void gd_gl_area_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_id,
bool backing_y_0_top,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
+void gd_gl_area_scanout_disable(DisplayChangeListener *dcl);
void gd_gl_area_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
void gtk_gl_area_init(void);
-QEMUGLContext gd_gl_area_get_current_context(DisplayChangeListener *dcl);
-int gd_gl_area_make_current(DisplayChangeListener *dcl,
+int gd_gl_area_make_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
+/* gtk-clipboard.c */
+void gd_clipboard_init(GtkDisplayState *gd);
+
#endif /* UI_GTK_H */
diff --git a/include/ui/input.h b/include/ui/input.h
index c86219a1c1..8f9aac562e 100644
--- a/include/ui/input.h
+++ b/include/ui/input.h
@@ -8,9 +8,12 @@
#define INPUT_EVENT_MASK_BTN (1<<INPUT_EVENT_KIND_BTN)
#define INPUT_EVENT_MASK_REL (1<<INPUT_EVENT_KIND_REL)
#define INPUT_EVENT_MASK_ABS (1<<INPUT_EVENT_KIND_ABS)
+#define INPUT_EVENT_MASK_MTT (1<<INPUT_EVENT_KIND_MTT)
#define INPUT_EVENT_ABS_MIN 0x0000
#define INPUT_EVENT_ABS_MAX 0x7FFF
+#define INPUT_EVENT_SLOTS_MIN 0x0
+#define INPUT_EVENT_SLOTS_MAX 0xa
typedef struct QemuInputHandler QemuInputHandler;
typedef struct QemuInputHandlerState QemuInputHandlerState;
@@ -27,7 +30,7 @@ struct QemuInputHandler {
};
QemuInputHandlerState *qemu_input_handler_register(DeviceState *dev,
- QemuInputHandler *handler);
+ const QemuInputHandler *handler);
void qemu_input_handler_activate(QemuInputHandlerState *s);
void qemu_input_handler_deactivate(QemuInputHandlerState *s);
void qemu_input_handler_unregister(QemuInputHandlerState *s);
@@ -54,13 +57,18 @@ void qemu_input_queue_btn(QemuConsole *src, InputButton btn, bool down);
void qemu_input_update_buttons(QemuConsole *src, uint32_t *button_map,
uint32_t button_old, uint32_t button_new);
-bool qemu_input_is_absolute(void);
+bool qemu_input_is_absolute(QemuConsole *con);
int qemu_input_scale_axis(int value,
int min_in, int max_in,
int min_out, int max_out);
void qemu_input_queue_rel(QemuConsole *src, InputAxis axis, int value);
void qemu_input_queue_abs(QemuConsole *src, InputAxis axis, int value,
int min_in, int max_in);
+void qemu_input_queue_mtt(QemuConsole *src, InputMultiTouchType type, int slot,
+ int tracking_id);
+void qemu_input_queue_mtt_abs(QemuConsole *src, InputAxis axis, int value,
+ int min_in, int max_in,
+ int slot, int tracking_id);
void qemu_input_check_mode_change(void);
void qemu_add_mouse_mode_change_notifier(Notifier *notify);
diff --git a/include/ui/kbd-state.h b/include/ui/kbd-state.h
index eb9067dd53..1f37b932eb 100644
--- a/include/ui/kbd-state.h
+++ b/include/ui/kbd-state.h
@@ -65,7 +65,7 @@ void qkbd_state_key_event(QKbdState *kbd, QKeyCode qcode, bool down);
* using qemu_input_event_send_key_delay().
*
* @kbd: state tracker state.
- * @delay_ms: the delay in miliseconds.
+ * @delay_ms: the delay in milliseconds.
*/
void qkbd_state_set_delay(QKbdState *kbd, int delay_ms);
@@ -99,4 +99,15 @@ bool qkbd_state_modifier_get(QKbdState *kbd, QKbdModifier mod);
*/
void qkbd_state_lift_all_keys(QKbdState *kbd);
+/**
+ * qkbd_state_switch_console: Switch console.
+ *
+ * This sends key up events to the previous console for all keys which are in
+ * down state to prevent keys being stuck, and remembers the new console.
+ *
+ * @kbd: state tracker state.
+ * @con: new QemuConsole for this state tracker.
+ */
+void qkbd_state_switch_console(QKbdState *kbd, QemuConsole *con);
+
#endif /* QEMU_UI_KBD_STATE_H */
diff --git a/include/ui/pixman-minimal.h b/include/ui/pixman-minimal.h
new file mode 100644
index 0000000000..6dd7de1c7e
--- /dev/null
+++ b/include/ui/pixman-minimal.h
@@ -0,0 +1,239 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Tiny subset of PIXMAN API commonly used by QEMU.
+ *
+ * Copyright 1987, 1988, 1989, 1998 The Open Group
+ * Copyright 1987, 1988, 1989 Digital Equipment Corporation
+ * Copyright 1999, 2004, 2008 Keith Packard
+ * Copyright 2000 SuSE, Inc.
+ * Copyright 2000 Keith Packard, member of The XFree86 Project, Inc.
+ * Copyright 2004, 2005, 2007, 2008, 2009, 2010 Red Hat, Inc.
+ * Copyright 2004 Nicholas Miell
+ * Copyright 2005 Lars Knoll & Zack Rusin, Trolltech
+ * Copyright 2005 Trolltech AS
+ * Copyright 2007 Luca Barbato
+ * Copyright 2008 Aaron Plattner, NVIDIA Corporation
+ * Copyright 2008 Rodrigo Kumpera
+ * Copyright 2008 André Tupinambá
+ * Copyright 2008 Mozilla Corporation
+ * Copyright 2008 Frederic Plourde
+ * Copyright 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009, 2010 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PIXMAN_MINIMAL_H
+#define PIXMAN_MINIMAL_H
+
+#define PIXMAN_TYPE_OTHER 0
+#define PIXMAN_TYPE_ARGB 2
+#define PIXMAN_TYPE_ABGR 3
+#define PIXMAN_TYPE_BGRA 8
+#define PIXMAN_TYPE_RGBA 9
+
+#define PIXMAN_FORMAT(bpp, type, a, r, g, b) (((bpp) << 24) | \
+ ((type) << 16) | \
+ ((a) << 12) | \
+ ((r) << 8) | \
+ ((g) << 4) | \
+ ((b)))
+
+#define PIXMAN_FORMAT_RESHIFT(val, ofs, num) \
+ (((val >> (ofs)) & ((1 << (num)) - 1)) << ((val >> 22) & 3))
+
+#define PIXMAN_FORMAT_BPP(f) PIXMAN_FORMAT_RESHIFT(f, 24, 8)
+#define PIXMAN_FORMAT_TYPE(f) (((f) >> 16) & 0x3f)
+#define PIXMAN_FORMAT_A(f) PIXMAN_FORMAT_RESHIFT(f, 12, 4)
+#define PIXMAN_FORMAT_R(f) PIXMAN_FORMAT_RESHIFT(f, 8, 4)
+#define PIXMAN_FORMAT_G(f) PIXMAN_FORMAT_RESHIFT(f, 4, 4)
+#define PIXMAN_FORMAT_B(f) PIXMAN_FORMAT_RESHIFT(f, 0, 4)
+#define PIXMAN_FORMAT_DEPTH(f) (PIXMAN_FORMAT_A(f) + \
+ PIXMAN_FORMAT_R(f) + \
+ PIXMAN_FORMAT_G(f) + \
+ PIXMAN_FORMAT_B(f))
+
+typedef enum {
+ /* 32bpp formats */
+ PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 8, 8, 8, 8),
+ PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ARGB, 0, 8, 8, 8),
+ PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 8, 8, 8, 8),
+ PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_ABGR, 0, 8, 8, 8),
+ PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 8, 8, 8, 8),
+ PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_BGRA, 0, 8, 8, 8),
+ PIXMAN_r8g8b8a8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 8, 8, 8, 8),
+ PIXMAN_r8g8b8x8 = PIXMAN_FORMAT(32, PIXMAN_TYPE_RGBA, 0, 8, 8, 8),
+ /* 24bpp formats */
+ PIXMAN_r8g8b8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ARGB, 0, 8, 8, 8),
+ PIXMAN_b8g8r8 = PIXMAN_FORMAT(24, PIXMAN_TYPE_ABGR, 0, 8, 8, 8),
+ /* 16bpp formats */
+ PIXMAN_r5g6b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 6, 5),
+ PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 1, 5, 5, 5),
+ PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16, PIXMAN_TYPE_ARGB, 0, 5, 5, 5),
+} pixman_format_code_t;
+
+typedef struct pixman_image pixman_image_t;
+
+typedef void (*pixman_image_destroy_func_t)(pixman_image_t *image, void *data);
+
+struct pixman_image {
+ int ref_count;
+ pixman_format_code_t format;
+ int width;
+ int height;
+ int stride;
+ uint32_t *data;
+ uint32_t *free_me;
+ pixman_image_destroy_func_t destroy_func;
+ void *destroy_data;
+};
+
+typedef struct pixman_color {
+ uint16_t red;
+ uint16_t green;
+ uint16_t blue;
+ uint16_t alpha;
+} pixman_color_t;
+
+static inline uint32_t *create_bits(pixman_format_code_t format,
+ int width,
+ int height,
+ int *rowstride_bytes)
+{
+ int stride = 0;
+ size_t buf_size = 0;
+ int bpp = PIXMAN_FORMAT_BPP(format);
+
+ /*
+ * Calculate the following while checking for overflow truncation:
+ * stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
+ */
+
+ if (unlikely(__builtin_mul_overflow(width, bpp, &stride))) {
+ return NULL;
+ }
+
+ if (unlikely(__builtin_add_overflow(stride, 0x1f, &stride))) {
+ return NULL;
+ }
+
+ stride >>= 5;
+
+ stride *= sizeof(uint32_t);
+
+ if (unlikely(__builtin_mul_overflow((size_t) height,
+ (size_t) stride,
+ &buf_size))) {
+ return NULL;
+ }
+
+ if (rowstride_bytes) {
+ *rowstride_bytes = stride;
+ }
+
+ return g_malloc0(buf_size);
+}
+
+static inline pixman_image_t *pixman_image_create_bits(pixman_format_code_t format,
+ int width,
+ int height,
+ uint32_t *bits,
+ int rowstride_bytes)
+{
+ pixman_image_t *i = g_new0(pixman_image_t, 1);
+
+ i->width = width;
+ i->height = height;
+ i->format = format;
+ if (bits) {
+ i->data = bits;
+ } else {
+ i->free_me = i->data =
+ create_bits(format, width, height, &rowstride_bytes);
+ if (width && height) {
+ assert(i->data);
+ }
+ }
+ i->stride = rowstride_bytes ? rowstride_bytes :
+ width * DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
+ i->ref_count = 1;
+
+ return i;
+}
+
+static inline pixman_image_t *pixman_image_ref(pixman_image_t *i)
+{
+ i->ref_count++;
+ return i;
+}
+
+static inline bool pixman_image_unref(pixman_image_t *i)
+{
+ i->ref_count--;
+
+ if (i->ref_count == 0) {
+ if (i->destroy_func) {
+ i->destroy_func(i, i->destroy_data);
+ }
+ g_free(i->free_me);
+ g_free(i);
+
+ return true;
+ }
+
+ return false;
+}
+
+static inline void pixman_image_set_destroy_function(pixman_image_t *i,
+ pixman_image_destroy_func_t func,
+ void *data)
+
+{
+ i->destroy_func = func;
+ i->destroy_data = data;
+}
+
+static inline uint32_t *pixman_image_get_data(pixman_image_t *i)
+{
+ return i->data;
+}
+
+static inline int pixman_image_get_height(pixman_image_t *i)
+{
+ return i->height;
+}
+
+static inline int pixman_image_get_width(pixman_image_t *i)
+{
+ return i->width;
+}
+
+static inline int pixman_image_get_stride(pixman_image_t *i)
+{
+ return i->stride;
+}
+
+static inline pixman_format_code_t pixman_image_get_format(pixman_image_t *i)
+{
+ return i->format;
+}
+
+#endif /* PIXMAN_MINIMAL_H */
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
index 87737a6f16..ef13a8210c 100644
--- a/include/ui/qemu-pixman.h
+++ b/include/ui/qemu-pixman.h
@@ -6,11 +6,11 @@
#ifndef QEMU_PIXMAN_H
#define QEMU_PIXMAN_H
-/* pixman-0.16.0 headers have a redundant declaration */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wredundant-decls"
+#ifdef CONFIG_PIXMAN
#include <pixman.h>
-#pragma GCC diagnostic pop
+#else
+#include "pixman-minimal.h"
+#endif
/*
* pixman image formats are defined to be native endian,
@@ -19,7 +19,7 @@
* feeding libjpeg / libpng and writing screenshots.
*/
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
# define PIXMAN_BE_r8g8b8 PIXMAN_r8g8b8
# define PIXMAN_BE_x8r8g8b8 PIXMAN_x8r8g8b8
# define PIXMAN_BE_a8r8g8b8 PIXMAN_a8r8g8b8
@@ -32,6 +32,8 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_b8g8r8a8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_b8g8r8x8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_r8g8b8a8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_r8g8b8x8
#else
# define PIXMAN_BE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_BE_x8r8g8b8 PIXMAN_b8g8r8x8
@@ -45,8 +47,16 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_r8g8b8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_a8r8g8b8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_x8r8g8b8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_a8b8g8r8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_x8b8g8r8
#endif
+#define QEMU_PIXMAN_COLOR(r, g, b) \
+ { .red = r << 8, .green = g << 8, .blue = b << 8, .alpha = 0xffff }
+
+#define QEMU_PIXMAN_COLOR_BLACK QEMU_PIXMAN_COLOR(0x00, 0x00, 0x00)
+#define QEMU_PIXMAN_COLOR_GRAY QEMU_PIXMAN_COLOR(0xaa, 0xaa, 0xaa)
+
/* -------------------------------------------------------------------- */
typedef struct PixelFormat {
@@ -62,22 +72,20 @@ typedef struct PixelFormat {
PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format);
pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian);
pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format);
+uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman);
int qemu_pixman_get_type(int rshift, int gshift, int bshift);
-pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
bool qemu_pixman_check_format(DisplayChangeListener *dcl,
pixman_format_code_t format);
+#ifdef CONFIG_PIXMAN
+pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
int width);
void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
int width, int x, int y);
-void qemu_pixman_linebuf_copy(pixman_image_t *fb, int width, int x, int y,
- pixman_image_t *linebuf);
pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format,
pixman_image_t *image);
-void qemu_pixman_image_unref(pixman_image_t *image);
-pixman_color_t qemu_pixman_color(PixelFormat *pf, uint32_t color);
pixman_image_t *qemu_pixman_glyph_from_vgafont(int height, const uint8_t *font,
unsigned int ch);
void qemu_pixman_glyph_render(pixman_image_t *glyph,
@@ -85,6 +93,9 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph,
pixman_color_t *fgcol,
pixman_color_t *bgcol,
int x, int y, int cw, int ch);
+#endif
+
+void qemu_pixman_image_unref(pixman_image_t *image);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(pixman_image_t, qemu_pixman_image_unref)
diff --git a/include/ui/qemu-spice-module.h b/include/ui/qemu-spice-module.h
new file mode 100644
index 0000000000..1f22d557ea
--- /dev/null
+++ b/include/ui/qemu-spice-module.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_SPICE_MODULE_H
+#define QEMU_SPICE_MODULE_H
+
+#ifdef CONFIG_SPICE
+#include <spice.h>
+#endif
+
+typedef struct SpiceInfo SpiceInfo;
+
+struct QemuSpiceOps {
+ void (*init)(void);
+ void (*display_init)(void);
+ int (*migrate_info)(const char *h, int p, int t, const char *s);
+ int (*set_passwd)(const char *passwd,
+ bool fail_if_connected, bool disconnect_if_connected);
+ int (*set_pw_expire)(time_t expires);
+ int (*display_add_client)(int csock, int skipauth, int tls);
+#ifdef CONFIG_SPICE
+ int (*add_interface)(SpiceBaseInstance *sin);
+ SpiceInfo* (*qmp_query)(Error **errp);
+#endif
+};
+
+extern int using_spice;
+extern struct QemuSpiceOps qemu_spice;
+
+#endif
diff --git a/include/ui/qemu-spice.h b/include/ui/qemu-spice.h
index 8c23dfe717..b7d493742c 100644
--- a/include/ui/qemu-spice.h
+++ b/include/ui/qemu-spice.h
@@ -19,73 +19,32 @@
#define QEMU_SPICE_H
#include "qapi/error.h"
+#include "ui/qemu-spice-module.h"
#ifdef CONFIG_SPICE
#include <spice.h>
#include "qemu/config-file.h"
-extern int using_spice;
-
-void qemu_spice_init(void);
void qemu_spice_input_init(void);
-void qemu_spice_audio_init(void);
void qemu_spice_display_init(void);
-int qemu_spice_display_add_client(int csock, int skipauth, int tls);
-int qemu_spice_add_interface(SpiceBaseInstance *sin);
+void qemu_spice_display_init_done(void);
bool qemu_spice_have_display_interface(QemuConsole *con);
int qemu_spice_add_display_interface(QXLInstance *qxlin, QemuConsole *con);
-int qemu_spice_set_passwd(const char *passwd,
- bool fail_if_connected, bool disconnect_if_connected);
-int qemu_spice_set_pw_expire(time_t expires);
int qemu_spice_migrate_info(const char *hostname, int port, int tls_port,
const char *subject);
-#if !defined(SPICE_SERVER_VERSION) || (SPICE_SERVER_VERSION < 0xc06)
-#define SPICE_NEEDS_SET_MM_TIME 1
+#if SPICE_SERVER_VERSION >= 0x000f00 /* release 0.15.0 */
+#define SPICE_HAS_ATTACHED_WORKER 1
#else
-#define SPICE_NEEDS_SET_MM_TIME 0
+#define SPICE_HAS_ATTACHED_WORKER 0
#endif
-void qemu_spice_register_ports(void);
#else /* CONFIG_SPICE */
#include "qemu/error-report.h"
-#define using_spice 0
#define spice_displays 0
-static inline int qemu_spice_set_passwd(const char *passwd,
- bool fail_if_connected,
- bool disconnect_if_connected)
-{
- return -1;
-}
-static inline int qemu_spice_set_pw_expire(time_t expires)
-{
- return -1;
-}
-static inline int qemu_spice_migrate_info(const char *h, int p, int t,
- const char *s)
-{
- return -1;
-}
-
-static inline int qemu_spice_display_add_client(int csock, int skipauth,
- int tls)
-{
- return -1;
-}
-
-static inline void qemu_spice_display_init(void)
-{
- /* This must never be called if CONFIG_SPICE is disabled */
- error_report("spice support is disabled");
- abort();
-}
-
-static inline void qemu_spice_init(void)
-{
-}
#endif /* CONFIG_SPICE */
diff --git a/include/ui/rect.h b/include/ui/rect.h
new file mode 100644
index 0000000000..7ebf47ebcd
--- /dev/null
+++ b/include/ui/rect.h
@@ -0,0 +1,57 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef QEMU_RECT_H
+#define QEMU_RECT_H
+
+
+typedef struct QemuRect {
+ int16_t x;
+ int16_t y;
+ uint16_t width;
+ uint16_t height;
+} QemuRect;
+
+static inline void qemu_rect_init(QemuRect *rect,
+ int16_t x, int16_t y,
+ uint16_t width, uint16_t height)
+{
+ rect->x = x;
+ rect->y = y;
+ rect->width = width;
+ rect->height = height;
+}
+
+static inline void qemu_rect_translate(QemuRect *rect,
+ int16_t dx, int16_t dy)
+{
+ rect->x += dx;
+ rect->y += dy;
+}
+
+static inline bool qemu_rect_intersect(const QemuRect *a, const QemuRect *b,
+ QemuRect *res)
+{
+ int16_t x1, x2, y1, y2;
+
+ x1 = MAX(a->x, b->x);
+ y1 = MAX(a->y, b->y);
+ x2 = MIN(a->x + a->width, b->x + b->width);
+ y2 = MIN(a->y + a->height, b->y + b->height);
+
+ if (x1 >= x2 || y1 >= y2) {
+ if (res) {
+ qemu_rect_init(res, 0, 0, 0, 0);
+ }
+
+ return false;
+ }
+
+ if (res) {
+ qemu_rect_init(res, x1, y1, x2 - x1, y2 - y1);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h
index 0875b8d56b..e3acc7c82a 100644
--- a/include/ui/sdl2.h
+++ b/include/ui/sdl2.h
@@ -5,7 +5,18 @@
#undef WIN32_LEAN_AND_MEAN
#include <SDL.h>
+
+/* with Alpine / muslc SDL headers pull in directfb headers
+ * which in turn trigger warning about redundant decls for
+ * direct_waitqueue_deinit.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
+
#include <SDL_syswm.h>
+
+#pragma GCC diagnostic pop
+
#ifdef CONFIG_SDL_IMAGE
# include <SDL_image.h>
#endif
@@ -16,6 +27,7 @@
#endif
struct sdl2_console {
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
DisplaySurface *surface;
DisplayOptions *opts;
@@ -65,12 +77,11 @@ void sdl2_gl_switch(DisplayChangeListener *dcl,
void sdl2_gl_refresh(DisplayChangeListener *dcl);
void sdl2_gl_redraw(struct sdl2_console *scon);
-QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl,
+QEMUGLContext sdl2_gl_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params);
-void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx);
-int sdl2_gl_make_context_current(DisplayChangeListener *dcl,
+void sdl2_gl_destroy_context(DisplayGLCtx *dgc, QEMUGLContext ctx);
+int sdl2_gl_make_context_current(DisplayGLCtx *dgc,
QEMUGLContext ctx);
-QEMUGLContext sdl2_gl_get_current_context(DisplayChangeListener *dcl);
void sdl2_gl_scanout_disable(DisplayChangeListener *dcl);
void sdl2_gl_scanout_texture(DisplayChangeListener *dcl,
@@ -79,7 +90,8 @@ void sdl2_gl_scanout_texture(DisplayChangeListener *dcl,
uint32_t backing_width,
uint32_t backing_height,
uint32_t x, uint32_t y,
- uint32_t w, uint32_t h);
+ uint32_t w, uint32_t h,
+ void *d3d_tex2d);
void sdl2_gl_scanout_flush(DisplayChangeListener *dcl,
uint32_t x, uint32_t y, uint32_t w, uint32_t h);
diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h
index 4a47ffdd4c..e1a9b36185 100644
--- a/include/ui/spice-display.h
+++ b/include/ui/spice-display.h
@@ -27,12 +27,10 @@
#include "ui/qemu-pixman.h"
#include "ui/console.h"
-#if defined(CONFIG_OPENGL_DMABUF)
-# if SPICE_SERVER_VERSION >= 0x000d01 /* release 0.13.1 */
+#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM)
# define HAVE_SPICE_GL 1
# include "ui/egl-helpers.h"
# include "ui/egl-context.h"
-# endif
#endif
#define NUM_MEMSLOTS 8
@@ -44,7 +42,7 @@
#define NUM_MEMSLOTS_GROUPS 2
/*
- * Internal enum to differenciate between options for
+ * Internal enum to differentiate between options for
* io calls that have a sync (old) version and an _async (new)
* version:
* QXL_SYNC: use the old version
@@ -86,6 +84,7 @@ typedef struct SimpleSpiceCursor SimpleSpiceCursor;
struct SimpleSpiceDisplay {
DisplaySurface *ds;
+ DisplayGLCtx dgc;
DisplayChangeListener dcl;
void *buf;
int bufsize;
@@ -183,8 +182,4 @@ void qemu_spice_display_start(void);
void qemu_spice_display_stop(void);
int qemu_spice_display_is_running(SimpleSpiceDisplay *ssd);
-bool qemu_spice_fill_device_address(QemuConsole *con,
- char *device_address,
- size_t size);
-
#endif
diff --git a/include/ui/surface.h b/include/ui/surface.h
new file mode 100644
index 0000000000..4244e0ca4a
--- /dev/null
+++ b/include/ui/surface.h
@@ -0,0 +1,95 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * QEMU UI Console
+ */
+#ifndef SURFACE_H
+#define SURFACE_H
+
+#include "ui/qemu-pixman.h"
+
+#ifdef CONFIG_OPENGL
+# include <epoxy/gl.h>
+# include "ui/shader.h"
+#endif
+
+#define QEMU_ALLOCATED_FLAG 0x01
+#define QEMU_PLACEHOLDER_FLAG 0x02
+
+typedef struct DisplaySurface {
+ pixman_image_t *image;
+ uint8_t flags;
+#ifdef CONFIG_OPENGL
+ GLenum glformat;
+ GLenum gltype;
+ GLuint texture;
+#endif
+#ifdef WIN32
+ HANDLE handle;
+ uint32_t handle_offset;
+#endif
+} DisplaySurface;
+
+PixelFormat qemu_default_pixelformat(int bpp);
+
+DisplaySurface *qemu_create_displaysurface_from(int width, int height,
+ pixman_format_code_t format,
+ int linesize, uint8_t *data);
+DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image);
+DisplaySurface *qemu_create_placeholder_surface(int w, int h,
+ const char *msg);
+#ifdef WIN32
+void qemu_displaysurface_win32_set_handle(DisplaySurface *surface,
+ HANDLE h, uint32_t offset);
+#endif
+
+DisplaySurface *qemu_create_displaysurface(int width, int height);
+void qemu_free_displaysurface(DisplaySurface *surface);
+
+static inline int is_buffer_shared(DisplaySurface *surface)
+{
+ return !(surface->flags & QEMU_ALLOCATED_FLAG);
+}
+
+static inline int is_placeholder(DisplaySurface *surface)
+{
+ return surface->flags & QEMU_PLACEHOLDER_FLAG;
+}
+
+static inline int surface_stride(DisplaySurface *s)
+{
+ return pixman_image_get_stride(s->image);
+}
+
+static inline void *surface_data(DisplaySurface *s)
+{
+ return pixman_image_get_data(s->image);
+}
+
+static inline int surface_width(DisplaySurface *s)
+{
+ return pixman_image_get_width(s->image);
+}
+
+static inline int surface_height(DisplaySurface *s)
+{
+ return pixman_image_get_height(s->image);
+}
+
+static inline pixman_format_code_t surface_format(DisplaySurface *s)
+{
+ return pixman_image_get_format(s->image);
+}
+
+static inline int surface_bits_per_pixel(DisplaySurface *s)
+{
+ int bits = PIXMAN_FORMAT_BPP(surface_format(s));
+ return bits;
+}
+
+static inline int surface_bytes_per_pixel(DisplaySurface *s)
+{
+ int bits = PIXMAN_FORMAT_BPP(surface_format(s));
+ return DIV_ROUND_UP(bits, 8);
+}
+
+#endif
diff --git a/include/user/safe-syscall.h b/include/user/safe-syscall.h
new file mode 100644
index 0000000000..aa075f4d5c
--- /dev/null
+++ b/include/user/safe-syscall.h
@@ -0,0 +1,140 @@
+/*
+ * safe-syscall.h: prototypes for linux-user signal-race-safe syscalls
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_USER_SAFE_SYSCALL_H
+#define LINUX_USER_SAFE_SYSCALL_H
+
+/**
+ * safe_syscall:
+ * @int number: number of system call to make
+ * ...: arguments to the system call
+ *
+ * Call a system call if guest signal not pending.
+ * This has the same API as the libc syscall() function, except that it
+ * may return -1 with errno == QEMU_ERESTARTSYS if a signal was pending.
+ *
+ * Returns: the system call result, or -1 with an error code in errno
+ * (Errnos are host errnos; we rely on QEMU_ERESTARTSYS not clashing
+ * with any of the host errno values.)
+ */
+
+/*
+ * A guide to using safe_syscall() to handle interactions between guest
+ * syscalls and guest signals:
+ *
+ * Guest syscalls come in two flavours:
+ *
+ * (1) Non-interruptible syscalls
+ *
+ * These are guest syscalls that never get interrupted by signals and
+ * so never return EINTR. They can be implemented straightforwardly in
+ * QEMU: just make sure that if the implementation code has to make any
+ * blocking calls that those calls are retried if they return EINTR.
+ * It's also OK to implement these with safe_syscall, though it will be
+ * a little less efficient if a signal is delivered at the 'wrong' moment.
+ *
+ * Some non-interruptible syscalls need to be handled using block_signals()
+ * to block signals for the duration of the syscall. This mainly applies
+ * to code which needs to modify the data structures used by the
+ * host_signal_handler() function and the functions it calls, including
+ * all syscalls which change the thread's signal mask.
+ *
+ * (2) Interruptible syscalls
+ *
+ * These are guest syscalls that can be interrupted by signals and
+ * for which we need to either return EINTR or arrange for the guest
+ * syscall to be restarted. This category includes both syscalls which
+ * always restart (and in the kernel return -ERESTARTNOINTR), ones
+ * which only restart if there is no handler (kernel returns -ERESTARTNOHAND
+ * or -ERESTART_RESTARTBLOCK), and the most common kind which restart
+ * if the handler was registered with SA_RESTART (kernel returns
+ * -ERESTARTSYS). System calls which are only interruptible in some
+ * situations (like 'open') also need to be handled this way.
+ *
+ * Here it is important that the host syscall is made
+ * via this safe_syscall() function, and *not* via the host libc.
+ * If the host libc is used then the implementation will appear to work
+ * most of the time, but there will be a race condition where a
+ * signal could arrive just before we make the host syscall inside libc,
+ * and then the guest syscall will not correctly be interrupted.
+ * Instead the implementation of the guest syscall can use the safe_syscall
+ * function but otherwise just return the result or errno in the usual
+ * way; the main loop code will take care of restarting the syscall
+ * if appropriate.
+ *
+ * (If the implementation needs to make multiple host syscalls this is
+ * OK; any which might really block must be via safe_syscall(); for those
+ * which are only technically blocking (ie which we know in practice won't
+ * stay in the host kernel indefinitely) it's OK to use libc if necessary.
+ * You must be able to cope with backing out correctly if some safe_syscall
+ * you make in the implementation returns either -QEMU_ERESTARTSYS or
+ * EINTR though.)
+ *
+ * block_signals() cannot be used for interruptible syscalls.
+ *
+ *
+ * How and why the safe_syscall implementation works:
+ *
+ * The basic setup is that we make the host syscall via a known
+ * section of host native assembly. If a signal occurs, our signal
+ * handler checks the interrupted host PC against the address of that
+ * known section. If the PC is before or at the address of the syscall
+ * instruction then we change the PC to point at a "return
+ * -QEMU_ERESTARTSYS" code path instead, and then exit the signal handler
+ * (causing the safe_syscall() call to immediately return that value).
+ * Then in the main.c loop if we see this magic return value we adjust
+ * the guest PC to wind it back to before the system call, and invoke
+ * the guest signal handler as usual.
+ *
+ * This winding-back will happen in two cases:
+ * (1) signal came in just before we took the host syscall (a race);
+ * in this case we'll take the guest signal and have another go
+ * at the syscall afterwards, and this is indistinguishable for the
+ * guest from the timing having been different such that the guest
+ * signal really did win the race
+ * (2) signal came in while the host syscall was blocking, and the
+ * host kernel decided the syscall should be restarted;
+ * in this case we want to restart the guest syscall also, and so
+ * rewinding is the right thing. (Note that "restart" semantics mean
+ * "first call the signal handler, then reattempt the syscall".)
+ * The other situation to consider is when a signal came in while the
+ * host syscall was blocking, and the host kernel decided that the syscall
+ * should not be restarted; in this case QEMU's host signal handler will
+ * be invoked with the PC pointing just after the syscall instruction,
+ * with registers indicating an EINTR return; the special code in the
+ * handler will not kick in, and we will return EINTR to the guest as
+ * we should.
+ *
+ * Notice that we can leave the host kernel to make the decision for
+ * us about whether to do a restart of the syscall or not; we do not
+ * need to check SA_RESTART flags in QEMU or distinguish the various
+ * kinds of restartability.
+ */
+
+/* The core part of this function is implemented in assembly */
+long safe_syscall_base(int *pending, long number, ...);
+long safe_syscall_set_errno_tail(int value);
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+#define safe_syscall(...) \
+ safe_syscall_base(&get_task_state(thread_cpu)->signal_pending, \
+ __VA_ARGS__)
+
+#endif
diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h
index 79fd3e5aa9..b48b2b2d0a 100644
--- a/include/user/syscall-trace.h
+++ b/include/user/syscall-trace.h
@@ -7,10 +7,13 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef _SYSCALL_TRACE_H_
-#define _SYSCALL_TRACE_H_
+#ifndef SYSCALL_TRACE_H
+#define SYSCALL_TRACE_H
-#include "trace-root.h"
+#include "exec/user/abitypes.h"
+#include "gdbstub/user.h"
+#include "qemu/plugin.h"
+#include "trace/trace-root.h"
/*
* These helpers just provide a common place for the various
@@ -18,25 +21,23 @@
* could potentially unify the -strace code here as well.
*/
-static inline void record_syscall_start(void *cpu, int num,
+static inline void record_syscall_start(CPUState *cpu, int num,
abi_long arg1, abi_long arg2,
abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6,
abi_long arg7, abi_long arg8)
{
- trace_guest_user_syscall(cpu, num,
- arg1, arg2, arg3, arg4,
- arg5, arg6, arg7, arg8);
qemu_plugin_vcpu_syscall(cpu, num,
arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8);
+ gdb_syscall_entry(cpu, num);
}
-static inline void record_syscall_return(void *cpu, int num, abi_long ret)
+static inline void record_syscall_return(CPUState *cpu, int num, abi_long ret)
{
- trace_guest_user_syscall_ret(cpu, num, ret);
qemu_plugin_vcpu_syscall_ret(cpu, num, ret);
+ gdb_syscall_return(cpu, num);
}
-#endif /* _SYSCALL_TRACE_H_ */
+#endif /* SYSCALL_TRACE_H */