aboutsummaryrefslogtreecommitdiff
path: root/hw/vfio/pci-quirks.c
diff options
context:
space:
mode:
authorJon Derrick <jonathan.derrick@intel.com>2020-06-11 11:36:39 -0600
committerAlex Williamson <alex.williamson@redhat.com>2020-06-11 11:36:39 -0600
commitee7932b0bba2062a0fad1e70d576ef13c4906c83 (patch)
tree167f52300ea37d622de3342769f4df674265053d /hw/vfio/pci-quirks.c
parent3666f684761a3cccd86d13c752273be207ecade4 (diff)
hw/vfio: Add VMD Passthrough Quirk
The VMD endpoint provides a real PCIe domain to the guest, including bridges and endpoints. Because the VMD domain is enumerated by the guest kernel, the guest kernel will assign Guest Physical Addresses to the downstream endpoint BARs and bridge windows. When the guest kernel performs MMIO to VMD sub-devices, MMU will translate from the guest address space to the physical address space. Because the bridges have been programmed with guest addresses, the bridges will reject the transaction containing physical addresses. VMD device 28C0 natively assists passthrough by providing the Host Physical Address in shadow registers accessible to the guest for bridge window assignment. The shadow registers are valid if bit 1 is set in VMD VMLOCK config register 0x70. In order to support existing VMDs, this quirk provides the shadow registers in a vendor-specific PCI capability to the vfio-passthrough device for all VMD device ids which don't natively assist with passthrough. The Linux VMD driver is updated to check for this new vendor-specific capability. Signed-off-by: Jon Derrick <jonathan.derrick@intel.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'hw/vfio/pci-quirks.c')
-rw-r--r--hw/vfio/pci-quirks.c84
1 files changed, 72 insertions, 12 deletions
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index f2155ddb1d..a1e615c54e 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -1567,18 +1567,6 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp)
return 0;
}
-int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
-{
- int ret;
-
- ret = vfio_add_nv_gpudirect_cap(vdev, errp);
- if (ret) {
- return ret;
- }
-
- return 0;
-}
-
static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
const char *name,
void *opaque, Error **errp)
@@ -1709,3 +1697,75 @@ free_exit:
return ret;
}
+
+/*
+ * The VMD endpoint provides a real PCIe domain to the guest and the guest
+ * kernel performs enumeration of the VMD sub-device domain. Guest transactions
+ * to VMD sub-devices go through MMU translation from guest addresses to
+ * physical addresses. When MMIO goes to an endpoint after being translated to
+ * physical addresses, the bridge rejects the transaction because the window
+ * has been programmed with guest addresses.
+ *
+ * VMD can use the Host Physical Address in order to correctly program the
+ * bridge windows in its PCIe domain. VMD device 28C0 has HPA shadow registers
+ * located at offset 0x2000 in MEMBAR2 (BAR 4). This quirk provides the HPA
+ * shadow registers in a vendor-specific capability register for devices
+ * without native support. The position of 0xE8-0xFF is in the reserved range
+ * of the VMD device capability space following the Power Management
+ * Capability.
+ */
+#define VMD_SHADOW_CAP_VER 1
+#define VMD_SHADOW_CAP_LEN 24
+static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp)
+{
+ uint8_t membar_phys[16];
+ int ret, pos = 0xE8;
+
+ if (!(vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x201D) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x467F) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x4C3D) ||
+ vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x9A0B))) {
+ return 0;
+ }
+
+ ret = pread(vdev->vbasedev.fd, membar_phys, 16,
+ vdev->config_offset + PCI_BASE_ADDRESS_2);
+ if (ret != 16) {
+ error_report("VMD %s cannot read MEMBARs (%d)",
+ vdev->vbasedev.name, ret);
+ return -EFAULT;
+ }
+
+ ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos,
+ VMD_SHADOW_CAP_LEN, errp);
+ if (ret < 0) {
+ error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: ");
+ return ret;
+ }
+
+ memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN);
+ pos += PCI_CAP_FLAGS;
+ pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_LEN);
+ pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_VER);
+ pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */
+ memcpy(vdev->pdev.config + pos + 4, membar_phys, 16);
+
+ return 0;
+}
+
+int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
+{
+ int ret;
+
+ ret = vfio_add_nv_gpudirect_cap(vdev, errp);
+ if (ret) {
+ return ret;
+ }
+
+ ret = vfio_add_vmd_shadow_cap(vdev, errp);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}