aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2018-09-06 08:11:11 -0700
committerLinux Build Service Account <lnxbuild@localhost>2018-09-06 08:11:11 -0700
commitb7adfe950d79afef108b054d0d7a63c994c7842f (patch)
tree4c98a17c793bb893a4c10635d6409a702dee3a73
parentff073868acf66a396d1708aac65eda6b72e0a03f (diff)
parente90f13d3e1a54e5a4c1adc5f271a65ad13650b26 (diff)
Merge e90f13d3e1a54e5a4c1adc5f271a65ad13650b26 on remote branchLA.UM.6.8.r1-07200-SDM710.0
Change-Id: I7940f174c635d9db490441557f81d21640985a11
-rw-r--r--AndroidKernel.mk57
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_ion.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_ipc_router_fifo_xprt.txt25
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/Kconfig.platforms6
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs605-lc.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-vm.dts29
-rw-r--r--arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig113
-rw-r--r--arch/arm64/configs/sdm670-perf_defconfig4
-rw-r--r--arch/arm64/configs/sdm670_defconfig6
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-merge.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/block/Kconfig20
-rw-r--r--drivers/block/Makefile5
-rw-r--r--drivers/block/vs_block_client.c959
-rw-r--r--drivers/block/vs_block_server.c1185
-rw-r--r--drivers/char/Kconfig46
-rw-r--r--drivers/char/Makefile8
-rw-r--r--drivers/char/diag/diag_memorydevice.c2
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c20
-rw-r--r--drivers/char/okl4_pipe.c677
-rw-r--r--drivers/char/vs_serial_client.c132
-rw-r--r--drivers/char/vs_serial_common.h91
-rw-r--r--drivers/char/vs_serial_server.c152
-rw-r--r--drivers/char/vservices_serial.c634
-rw-r--r--drivers/cpuidle/lpm-levels.c27
-rw-r--r--drivers/irqchip/irq-gic-v3.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context_utils.c3
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_node.c7
-rw-r--r--drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c7
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c26
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c274
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h3
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c231
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h20
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c49
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c10
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c101
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c52
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h9
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c3
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c8
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync.c183
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c78
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h24
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_platform.c12
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c20
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/okl4-link-shbuf.c667
-rw-r--r--drivers/mmc/host/sdhci-msm-ice.c42
-rw-r--r--drivers/net/ppp/pppolac.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c16
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c19
-rw-r--r--drivers/soc/qcom/Kconfig16
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/hyp_core_ctl.c354
-rw-r--r--drivers/soc/qcom/icnss.c6
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c233
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c18
-rw-r--r--drivers/staging/android/ion/msm_ion_priv.h6
-rw-r--r--drivers/staging/android/uapi/msm_ion.h5
-rw-r--r--drivers/tty/Kconfig20
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/n_tty.c28
-rw-r--r--drivers/tty/okl4_vtty.c882
-rw-r--r--drivers/vservices/Kconfig81
-rw-r--r--drivers/vservices/Kconfig.stacks7
-rw-r--r--drivers/vservices/Makefile16
-rw-r--r--drivers/vservices/compat.h59
-rw-r--r--drivers/vservices/core_client.c733
-rw-r--r--drivers/vservices/core_server.c1651
-rw-r--r--drivers/vservices/debug.h74
-rw-r--r--drivers/vservices/devio.c1059
-rw-r--r--drivers/vservices/protocol/Kconfig44
-rw-r--r--drivers/vservices/protocol/Makefile5
-rw-r--r--drivers/vservices/protocol/block/Makefile7
-rw-r--r--drivers/vservices/protocol/block/client.c1186
-rw-r--r--drivers/vservices/protocol/block/server.c1371
-rw-r--r--drivers/vservices/protocol/core/Makefile7
-rw-r--r--drivers/vservices/protocol/core/client.c1069
-rw-r--r--drivers/vservices/protocol/core/server.c1226
-rw-r--r--drivers/vservices/protocol/serial/Makefile7
-rw-r--r--drivers/vservices/protocol/serial/client.c925
-rw-r--r--drivers/vservices/protocol/serial/server.c1086
-rw-r--r--drivers/vservices/session.c2913
-rw-r--r--drivers/vservices/session.h173
-rw-r--r--drivers/vservices/skeleton_driver.c133
-rw-r--r--drivers/vservices/transport.h40
-rw-r--r--drivers/vservices/transport/Kconfig20
-rw-r--r--drivers/vservices/transport/Makefile5
-rw-r--r--drivers/vservices/transport/axon.c3573
-rw-r--r--fs/f2fs/data.c4
-rw-r--r--include/Kbuild4
-rw-r--r--include/asm-generic/okl4_virq.h27
-rw-r--r--include/linux/Kbuild.vservices3
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/mm_inline.h7
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/microvisor/kernel/microvisor.h59
-rw-r--r--include/microvisor/kernel/offsets.h1534
-rw-r--r--include/microvisor/kernel/syscalls.h6114
-rw-r--r--include/microvisor/kernel/types.h16064
-rw-r--r--include/microvisor/microvisor.h64
-rw-r--r--include/soc/qcom/secure_buffer.h3
-rw-r--r--include/trace/events/hyp_core_ctl.h75
-rw-r--r--include/trace/events/sched.h5
-rw-r--r--include/trace/events/writeback.h2
-rw-r--r--include/uapi/linux/Kbuild5
-rw-r--r--include/uapi/linux/okl4-link-shbuf.h40
-rw-r--r--include/vservices/Kbuild2
-rw-r--r--include/vservices/buffer.h239
-rw-r--r--include/vservices/ioctl.h48
-rw-r--r--include/vservices/protocol/Kbuild12
-rw-r--r--include/vservices/protocol/block/Kbuild1
-rw-r--r--include/vservices/protocol/block/client.h175
-rw-r--r--include/vservices/protocol/block/common.h42
-rw-r--r--include/vservices/protocol/block/server.h177
-rw-r--r--include/vservices/protocol/block/types.h106
-rw-r--r--include/vservices/protocol/core.h145
-rw-r--r--include/vservices/protocol/core/Kbuild1
-rw-r--r--include/vservices/protocol/core/client.h155
-rw-r--r--include/vservices/protocol/core/common.h38
-rw-r--r--include/vservices/protocol/core/server.h171
-rw-r--r--include/vservices/protocol/core/types.h87
-rw-r--r--include/vservices/protocol/serial/Kbuild1
-rw-r--r--include/vservices/protocol/serial/client.h114
-rw-r--r--include/vservices/protocol/serial/common.h37
-rw-r--r--include/vservices/protocol/serial/server.h134
-rw-r--r--include/vservices/protocol/serial/types.h88
-rw-r--r--include/vservices/service.h674
-rw-r--r--include/vservices/session.h161
-rw-r--r--include/vservices/transport.h150
-rw-r--r--include/vservices/types.h41
-rw-r--r--include/vservices/wait.h455
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c7
-rwxr-xr-xkernel/sched/fair.c71
-rw-r--r--kernel/sched/sched.h28
-rw-r--r--kernel/sched/tune.c4
-rw-r--r--kernel/sched/walt.h4
-rw-r--r--mm/swap.c9
-rw-r--r--mm/vmscan.c83
-rw-r--r--net/core/sockev_nlmcast.c20
-rw-r--r--net/ipc_router/Kconfig19
-rw-r--r--net/ipc_router/Makefile1
-rw-r--r--net/ipc_router/ipc_router_core.c44
-rw-r--r--net/ipc_router/ipc_router_fifo_xprt.c500
-rw-r--r--net/ipc_router/ipc_router_private.h4
-rw-r--r--security/pfe/pfk_ice.c5
-rw-r--r--security/selinux/ss/services.c2
171 files changed, 52681 insertions, 640 deletions
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 2a8d50659784..26e5cb8b7554 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -6,6 +6,8 @@ ifeq ($(KERNEL_TARGET),)
INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
endif
+INSTALLED_KERNEL_VM_TARGET := $(PRODUCT_OUT)/kernel_vm
+
TARGET_KERNEL_MAKE_ENV := $(strip $(TARGET_KERNEL_MAKE_ENV))
ifeq ($(TARGET_KERNEL_MAKE_ENV),)
KERNEL_MAKE_ENV :=
@@ -64,12 +66,16 @@ ifeq ($(TARGET_KERNEL),$(current_dir))
KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/kernel/$(TARGET_KERNEL)
KERNEL_SYMLINK := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ
KERNEL_USR := $(KERNEL_SYMLINK)/usr
+ KERNEL_VM_OUT := $(TARGET_OUT_INTERMEDIATES)/kernel_vm/$(TARGET_KERNEL)
+ KERNEL_VM_SYMLINK := $(TARGET_OUT_INTERMEDIATES)/KERNEL_VM_OBJ
+ KERNEL_VM_USR := $(KERNEL_VM_SYMLINK)/usr
else
# Legacy style, kernel source directly under kernel
KERNEL_LEGACY_DIR := true
BUILD_ROOT_LOC := ../
TARGET_KERNEL_SOURCE := kernel
KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ
+ KERNEL_VM_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_VM_OBJ
endif
KERNEL_CONFIG := $(KERNEL_OUT)/.config
@@ -80,26 +86,32 @@ else
ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true)
$(info Using uncompressed kernel)
-TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/Image
+TARGET_PREBUILT_INT_KERNEL_ := arch/$(KERNEL_ARCH)/boot/Image
else
ifeq ($(KERNEL_ARCH),arm64)
-TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/Image.gz
+TARGET_PREBUILT_INT_KERNEL_ := arch/$(KERNEL_ARCH)/boot/Image.gz
else
-TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/zImage
+TARGET_PREBUILT_INT_KERNEL_ := arch/$(KERNEL_ARCH)/boot/zImage
endif
endif
ifeq ($(TARGET_KERNEL_APPEND_DTB), true)
$(info Using appended DTB)
-TARGET_PREBUILT_INT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)-dtb
+TARGET_PREBUILT_INT_KERNEL_ := $(TARGET_PREBUILT_INT_KERNEL_)-dtb
endif
KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr
KERNEL_MODULES_INSTALL ?= system
KERNEL_MODULES_OUT ?= $(PRODUCT_OUT)/$(KERNEL_MODULES_INSTALL)/lib/modules
+TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/$(TARGET_PREBUILT_INT_KERNEL_)
TARGET_PREBUILT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)
+KERNEL_VM_CONFIG := $(KERNEL_VM_OUT)/.config
+KERNEL_VM_HEADERS_INSTALL := $(KERNEL_VM_OUT)/usr
+TARGET_PREBUILT_INT_KERNEL_VM := $(KERNEL_VM_OUT)/$(TARGET_PREBUILT_INT_KERNEL_)
+TARGET_PREBUILT_KERNEL_VM := $(TARGET_PREBUILT_INT_KERNEL_VM)
+
define mv-modules
mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.dep`;\
if [ "$$mdpath" != "" ];then\
@@ -116,12 +128,49 @@ mpath=`dirname $$mdpath`; rm -rf $$mpath;\
fi
endef
+$(TARGET_PREBUILT_INT_KERNEL_VM): ;
+
ifneq ($(KERNEL_LEGACY_DIR),true)
$(KERNEL_USR): $(KERNEL_HEADERS_INSTALL)
rm -rf $(KERNEL_SYMLINK)
ln -s kernel/$(TARGET_KERNEL) $(KERNEL_SYMLINK)
$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_USR)
+
+ifneq ($(KERNEL_VM_DEFCONFIG),)
+$(KERNEL_VM_USR): $(KERNEL_VM_HEADERS_INSTALL)
+ rm -rf $(KERNEL_VM_SYMLINK);
+ ln -s kernel_vm/$(TARGET_KERNEL) $(KERNEL_VM_SYMLINK);
+
+$(TARGET_PREBUILT_INT_KERNEL_VM): $(KERNEL_VM_USR)
+endif
+endif
+
+ifneq ($(KERNEL_VM_DEFCONFIG),)
+$(KERNEL_VM_OUT):
+ mkdir -p $(KERNEL_VM_OUT);
+
+$(KERNEL_VM_CONFIG): $(KERNEL_VM_OUT)
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_VM_DEFCONFIG);
+ if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_VM_OUT)/.config; \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi;
+
+$(TARGET_PREBUILT_INT_KERNEL_VM): $(KERNEL_VM_OUT) $(KERNEL_VM_HEADERS_INSTALL)
+ echo "Building vm kernel...";
+ rm -rf $(KERNEL_VM_OUT)/arch/$(KERNEL_ARCH)/boot/dts;
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS);
+
+$(KERNEL_VM_HEADERS_INSTALL): $(KERNEL_VM_OUT)
+ rm -f $(BUILD_ROOT_LOC)$(KERNEL_VM_CONFIG);
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_VM_DEFCONFIG);
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;
+ if [ -d "$(KERNEL_VM_HEADERS_INSTALL)/include/bringup_headers" ]; then \
+ cp -Rf $(KERNEL_VM_HEADERS_INSTALL)/include/bringup_headers/* $(KERNEL_VM_HEADERS_INSTALL)/include/ ; fi ;
+ if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_VM_OUT)/.config; \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_VM_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi;
endif
$(KERNEL_OUT):
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
index 6527675e258d..c377930f881f 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -22,6 +22,7 @@ Required properties for Ion heaps
- "SYSTEM"
- "SYSTEM_CONTIG"
- "CARVEOUT"
+ - "SECURE_CARVEOUT"
- "CHUNK"
- "CP"
- "DMA"
@@ -70,3 +71,32 @@ Example:
qcom,ion-heap-type = "CARVEOUT";
};
};
+
+"SECURE_CARVEOUT"
+
+This heap type is expected to contain multiple child nodes. Each child node
+shall contain the following required properties:
+
+- memory-regions:
+Refer to Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+- token:
+A u32 containing the set of secure domains which will be able to access the
+memory-region.
+
+Example:
+qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@14 {
+ reg = <14>;
+ qcom,ion-heap-type = "SECURE_CARVEOUT";
+
+ node1 {
+ memory-regions = <&cp_region>;
+ token = <ION_FLAG_CP_TOUCH>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_fifo_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_fifo_xprt.txt
new file mode 100644
index 000000000000..a6fd56c24171
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_fifo_xprt.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. IPC Router FIFO Transport
+
+Required properties:
+- compatible: should be "qcom,ipcr_fifo_xprt"
+- reg: the irq register to raise an interrupt
+- interrupts: the receiving interrupt line
+- qcom,ipc-shm: Reference to shared memory phandle
+
+Example:
+
+ fifo_vipc_irq@176 {
+ compatible = "qcom,ipcr-fifo-xprt";
+ reg = <0x176>;
+ interrupts = <0x0 0x142 0x1>;
+ qcom,ipc-shm = <&ipc-shm>;
+ };
+
+ ipc-shm: shared-buffer@85af7000 {
+ compatible = "qcom,hypervisor-shared-memory";
+ phandle = <0x1e4>;
+ reg = <0x0 0x85af7000 0x0 0x9000>;
+ label = "ipc_shm";
+ qcom,tx-is-first;
+ };
+
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 87a98bc70af3..d80a03bc8cef 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -84,6 +84,7 @@ config ARM64
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
@@ -773,6 +774,13 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
+config OKL4_GUEST
+ bool "OKL4 Hypervisor guest support"
+ depends on ARM64 && OF
+ default n
+ help
+ Say Y if you want to run Linux as a guest of the OKL4 hypervisor
+
config FORCE_MAX_ZONEORDER
int
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 1de858e554dd..c44f93859510 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -145,6 +145,12 @@ config ARCH_SDM670
This enables support for the SDM670 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
+config ARCH_SDM670_VM
+ bool "Enable Support for Qualcomm Technologies Inc. SDM670 VM"
+ help
+ This enables support for VM on sdm670 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
config ARCH_MSM8953
bool "Enable Support for Qualcomm Technologies Inc. MSM8953"
depends on ARCH_QCOM
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 77106e0b2fef..0ebba88077d8 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -436,6 +436,8 @@ dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \
endif
+dtb-$(CONFIG_ARCH_SDM670_VM) += sdm670-vm.dtb
+
always := $(dtb-y)
subdir-y := $(dts-dirs)
clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
index 06443572ba1d..61d777c9fac3 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
@@ -374,3 +374,13 @@
&int_codec {
/delete-property/ qcom,ext-disp-audio-rx;
};
+
+&bluetooth {
+ qca,bt-vdd-core-supply = <&pm660_l9>;
+ qca,bt-vdd-pa-supply = <&pm660_l3>;
+ /delete-property/ qca,bt-vdd-ldo-supply;
+};
+
+&qupv3_se6_4uart {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vm.dts b/arch/arm64/boot/dts/qcom/sdm670-vm.dts
new file mode 100644
index 000000000000..e0decdff6f5c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-vm.dts
@@ -0,0 +1,29 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "SDM670 Virtual Platform";
+ compatible = "linux,dummy-virt";
+
+ interrupt-parent = <&intc>;
+
+ intc: interrupt-controller {
+ interrupt-controller;
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ };
+};
diff --git a/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig b/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig
new file mode 100644
index 000000000000..8e93c77d051f
--- /dev/null
+++ b/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig
@@ -0,0 +1,113 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SCHED_WALT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_ARCH_SDM670_VM=y
+CONFIG_NR_CPUS=8
+CONFIG_CMA=y
+CONFIG_OKL4_GUEST=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_IPV6 is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_NODE_ID=101
+CONFIG_IPC_ROUTER_FIFO_XPRT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+# CONFIG_VSERVICES_BLOCK_SERVER is not set
+# CONFIG_OKL4_LINK_SHBUF is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_NETDEVICES=y
+CONFIG_INPUT=m
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_INPUT_GPIO=m
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_OKL4_PIPE=y
+# CONFIG_VSERVICES_SERIAL_SERVER is not set
+# CONFIG_VSERVICES_SERIAL_CLIENT is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_FTRACE is not set
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index e590fd752eb6..1769e2acf9ae 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -66,6 +66,7 @@ CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
+CONFIG_OKL4_GUEST=y
CONFIG_HARDEN_BRANCH_PREDICTOR=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -234,6 +235,7 @@ CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_IPC_ROUTER_FIFO_XPRT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_AQT_REGMAP=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
@@ -551,6 +553,7 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
+CONFIG_QCOM_HYP_CORE_CTL=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -620,6 +623,7 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 69cc0339c324..7a5b36e16d66 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -71,6 +71,7 @@ CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
+CONFIG_OKL4_GUEST=y
CONFIG_HARDEN_BRANCH_PREDICTOR=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -243,6 +244,7 @@ CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_IPC_ROUTER_FIFO_XPRT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_AQT_REGMAP=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
@@ -306,6 +308,8 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_DIAG_CHAR=y
@@ -569,6 +573,7 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
+CONFIG_QCOM_HYP_CORE_CTL=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -680,6 +685,7 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/block/blk-core.c b/block/blk-core.c
index 37b814a01529..f8c5cff2f0b7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1548,6 +1548,7 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bio->bi_next = req->bio;
req->bio = bio;
+ WARN_ON(req->__dun || bio->bi_iter.bi_dun);
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@@ -1663,6 +1664,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->errors = 0;
req->__sector = bio->bi_iter.bi_sector;
+ req->__dun = bio->bi_iter.bi_dun;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -2665,8 +2667,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
- if (req->cmd_type == REQ_TYPE_FS)
+ if (req->cmd_type == REQ_TYPE_FS) {
req->__sector += total_bytes >> 9;
+ if (req->__dun)
+ req->__dun += total_bytes >> 12;
+ }
/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
@@ -3067,6 +3072,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
(src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
+ dst->__dun = blk_rq_dun(src);
dst->__data_len = blk_rq_bytes(src);
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index f44daa1925d4..c71d1cb662c5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -876,6 +876,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
int blk_try_merge(struct request *rq, struct bio *bio)
{
+ if (blk_rq_dun(rq) || bio_dun(bio))
+ return ELEVATOR_NO_MERGE;
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e37c9aa5bae3..5e7dadf7a086 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,8 @@ source "drivers/hv/Kconfig"
source "drivers/xen/Kconfig"
+source "drivers/vservices/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 04e2d4e83f4e..142e2f9ca1a6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,8 @@ obj-y += bus/
obj-$(CONFIG_GENERIC_PHY) += phy/
+obj-$(CONFIG_VSERVICES_SUPPORT) += vservices/
+
# GPIO must come after pinctrl as gpios may need to mux pins etc
obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b6ef86..64d95c9514a0 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -538,3 +538,23 @@ config BLK_DEV_RSXX
module will be called rsxx.
endif # BLK_DEV
+
+config VSERVICES_BLOCK_SERVER
+ tristate "Virtual Services block server"
+ depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_SERVER
+ default y
+ select VSERVICES_PROTOCOL_BLOCK_SERVER
+ help
+ Select this option if you want support for server side Virtual
+ Services block. This allows any Linux block device to be
+ virtualized and exported as a virtual service.
+
+config VSERVICES_BLOCK_CLIENT
+ tristate "Virtual Services Block client device"
+ depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_CLIENT
+ default y
+ select VSERVICES_PROTOCOL_BLOCK_CLIENT
+ help
+ Select this option if you want support for client side Virtual
+ Services block devices. The virtual block devices are typically
+ named /dev/vblock0, /dev/vblock1, etc.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1e9661e26f29..fe9229f187da 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -44,3 +44,8 @@ obj-$(CONFIG_ZRAM) += zram/
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
+
+obj-$(CONFIG_VSERVICES_BLOCK_SERVER) += vs_block_server.o
+CFLAGS_vs_block_server.o += -Werror
+obj-$(CONFIG_VSERVICES_BLOCK_CLIENT) += vs_block_client.o
+CFLAGS_vs_block_client.o += -Werror
diff --git a/drivers/block/vs_block_client.c b/drivers/block/vs_block_client.c
new file mode 100644
index 000000000000..5b6d005dd20d
--- /dev/null
+++ b/drivers/block/vs_block_client.c
@@ -0,0 +1,959 @@
+/*
+ * drivers/block/vs_block_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice client driver
+ *
+ * Function vs_block_client_vs_alloc() is partially derived from
+ * drivers/block/brd.c (brd_alloc())
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/session.h>
+#include <vservices/wait.h>
+
+/*
+ * BLK_DEF_MAX_SECTORS was replaced with the hard-coded number 1024 in 3.19,
+ * and restored in 4.3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define BLK_DEF_MAX_SECTORS 1024
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+#define bio_sector(bio) (bio)->bi_iter.bi_sector
+#define bio_size(bio) (bio)->bi_iter.bi_size
+#else
+#define bio_sector(bio) (bio)->bi_sector
+#define bio_size(bio) (bio)->bi_size
+#endif
+
+#define CLIENT_BLKDEV_NAME "vblock"
+
+#define PERDEV_MINORS 256
+
+struct block_client;
+
+struct vs_block_device {
+ /*
+ * The client that created this block device. A reference is held
+ * to the client until the block device is released, so this pointer
+ * should always be valid. However, the client may since have reset;
+ * so it should only be used if, after locking it, its blkdev pointer
+ * points back to this block device.
+ */
+ struct block_client *client;
+
+ int id;
+ struct gendisk *disk;
+ struct request_queue *queue;
+
+ struct kref kref;
+};
+
+struct block_client {
+ struct vs_client_block_state client;
+ struct vs_service_device *service;
+
+ /* Tasklet & queue for bouncing buffers out of read acks */
+ struct tasklet_struct rx_tasklet;
+ struct list_head rx_queue;
+ struct spinlock rx_queue_lock;
+
+ /*
+ * The current virtual block device. This gets replaced when we do
+ * a reset since other parts of the kernel (e.g. vfs) may still
+ * be accessing the disk.
+ */
+ struct vs_block_device *blkdev;
+
+ /* Shared work item for disk creation */
+ struct work_struct disk_creation_work;
+
+ struct kref kref;
+};
+
+#define state_to_block_client(state) \
+ container_of(state, struct block_client, client)
+
+static int block_client_major;
+
+/* Unique identifier allocation for virtual block devices */
+static DEFINE_IDA(vs_block_ida);
+static DEFINE_MUTEX(vs_block_ida_lock);
+
+static int
+block_client_vs_to_linux_error(vservice_block_block_io_error_t vs_err)
+{
+ switch (vs_err) {
+ case VSERVICE_BLOCK_INVALID_INDEX:
+ return -EILSEQ;
+ case VSERVICE_BLOCK_MEDIA_FAILURE:
+ return -EIO;
+ case VSERVICE_BLOCK_MEDIA_TIMEOUT:
+ return -ETIMEDOUT;
+ case VSERVICE_BLOCK_UNSUPPORTED_COMMAND:
+ return -ENOTSUPP;
+ case VSERVICE_BLOCK_SERVICE_RESET:
+ return -ENXIO;
+ default:
+ WARN_ON(vs_err);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void vs_block_client_kfree(struct kref *kref)
+{
+ struct block_client *client =
+ container_of(kref, struct block_client, kref);
+
+ vs_put_service(client->service);
+ kfree(client);
+}
+
+static void vs_block_client_put(struct block_client *client)
+{
+ kref_put(&client->kref, vs_block_client_kfree);
+}
+
+static void vs_block_device_kfree(struct kref *kref)
+{
+ struct vs_block_device *blkdev =
+ container_of(kref, struct vs_block_device, kref);
+
+ /* Delete the disk and clean up its queue */
+ del_gendisk(blkdev->disk);
+ blk_cleanup_queue(blkdev->queue);
+ put_disk(blkdev->disk);
+
+ mutex_lock(&vs_block_ida_lock);
+ ida_remove(&vs_block_ida, blkdev->id);
+ mutex_unlock(&vs_block_ida_lock);
+
+ if (blkdev->client)
+ vs_block_client_put(blkdev->client);
+
+ kfree(blkdev);
+}
+
+static void vs_block_device_put(struct vs_block_device *blkdev)
+{
+ kref_put(&blkdev->kref, vs_block_device_kfree);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+static void
+#else
+static int
+#endif
+vs_block_client_blkdev_release(struct gendisk *disk, fmode_t mode)
+{
+ struct vs_block_device *blkdev = disk->private_data;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+ if (WARN_ON(!blkdev))
+ return;
+#else
+ if (WARN_ON(!blkdev))
+ return -ENXIO;
+#endif
+
+ vs_block_device_put(blkdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+ return 0;
+#endif
+}
+
+static int vs_block_client_blkdev_open(struct block_device *bdev, fmode_t mode)
+{
+ struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+ struct block_client *client;
+ int err = -ENXIO;
+
+ if (!blkdev || !kref_get_unless_zero(&blkdev->kref))
+ goto fail_get_blkdev;
+
+ client = blkdev->client;
+ if (WARN_ON(!client))
+ goto fail_lock_client;
+
+ if (!vs_state_lock_safe(&client->client)) {
+ err = -ENODEV;
+ goto fail_lock_client;
+ }
+
+ if (blkdev != client->blkdev) {
+ /* The client has reset, this blkdev is no longer usable */
+ err = -ENXIO;
+ goto fail_check_client;
+ }
+
+ if ((mode & FMODE_WRITE) > 0 && client->client.readonly) {
+ dev_dbg(&client->service->dev,
+ "opening a readonly disk as writable\n");
+ err = -EROFS;
+ goto fail_check_client;
+ }
+
+ vs_state_unlock(&client->client);
+
+ return 0;
+
+fail_check_client:
+ vs_state_unlock(&client->client);
+fail_lock_client:
+ vs_block_device_put(blkdev);
+fail_get_blkdev:
+ return err;
+}
+
+static int vs_block_client_blkdev_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ /* These numbers are some default sane values for disk geometry. */
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+
+ return 0;
+}
+
+/*
+ * Indirectly determine linux block layer sector size and ensure that our
+ * sector size matches.
+ */
+static int vs_block_client_check_sector_size(struct block_client *client,
+ struct bio *bio)
+{
+ if (unlikely(!bio_sectors(bio))) {
+ dev_err(&client->service->dev, "zero-length bio");
+ return -EIO;
+ }
+
+ if (unlikely(bio_size(bio) % client->client.sector_size)) {
+ dev_err(&client->service->dev,
+ "bio has %zd bytes, which is unexpected "
+ "for sector_size of %zd bytes",
+ (size_t)bio_size(bio),
+ (size_t)client->client.sector_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct block_device_operations block_client_ops = {
+ .getgeo = vs_block_client_blkdev_getgeo,
+ .open = vs_block_client_blkdev_open,
+ .release = vs_block_client_blkdev_release,
+ .owner = THIS_MODULE,
+};
+
+static int block_client_send_write_req(struct block_client *client,
+ struct bio *bio)
+{
+ struct vs_client_block_state *state = &client->client;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+ struct bio_vec *bvec;
+ int err;
+ bool flush, nodelay, commit;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ struct bvec_iter iter;
+ struct bio_vec bvec_local;
+#else
+ int i;
+#endif
+
+ err = vs_block_client_check_sector_size(client, bio);
+ if (err < 0)
+ goto fail;
+
+ do {
+ /* Wait until it's possible to send a write request */
+ err = vs_wait_state_nointr(state,
+ vs_client_block_io_req_write_can_send(state));
+ if (err == -ECANCELED)
+ err = -ENXIO;
+ if (err < 0)
+ goto fail;
+
+ /* Wait for quota, while sending a write remains possible */
+ mbuf = vs_wait_alloc_nointr(state,
+ vs_client_block_io_req_write_can_send(state),
+ vs_client_block_io_alloc_req_write(
+ state, &pbuf, GFP_KERNEL));
+ err = IS_ERR(mbuf) ? PTR_ERR(mbuf) : 0;
+
+ /* Retry if sending is no longer possible */
+ } while (err == -ECANCELED);
+
+ if (err < 0)
+ goto fail;
+
+ vs_pbuf_resize(&pbuf, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ bvec = &bvec_local;
+ bio_for_each_segment(bvec_local, bio, iter)
+#else
+ bio_for_each_segment(bvec, bio, i)
+#endif
+ {
+ unsigned long flags;
+ void *buf = bvec_kmap_irq(bvec, &flags);
+ flush_kernel_dcache_page(bvec->bv_page);
+ err = vs_pbuf_append(&pbuf, buf, bvec->bv_len);
+ bvec_kunmap_irq(buf, &flags);
+ if (err < 0) {
+ dev_err(&client->service->dev,
+ "pbuf copy failed with err %d\n", err);
+ err = -EIO;
+ goto fail_free_write;
+ }
+ }
+
+ if (unlikely(vs_pbuf_size(&pbuf) != bio_size(bio))) {
+ dev_err(&client->service->dev,
+ "pbuf size is wrong: %zd, should be %zd\n",
+ vs_pbuf_size(&pbuf), (size_t)bio_size(bio));
+ err = -EIO;
+ goto fail_free_write;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ flush = (bio_flags(bio) & REQ_PREFLUSH);
+ commit = (bio_flags(bio) & REQ_FUA);
+ nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+ flush = (bio->bi_rw & REQ_FLUSH);
+ commit = (bio->bi_rw & REQ_FUA);
+ nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+ err = vs_client_block_io_req_write(state, bio, bio_sector(bio),
+ bio_sectors(bio), nodelay, flush, commit, pbuf, mbuf);
+
+ if (err) {
+ dev_err(&client->service->dev,
+ "write req failed with err %d\n", err);
+ goto fail_free_write;
+ }
+
+ return 0;
+
+fail_free_write:
+ vs_client_block_io_free_req_write(state, &pbuf, mbuf);
+fail:
+ return err;
+}
+
+static int block_client_send_read_req(struct block_client *client,
+ struct bio *bio)
+{
+ struct vs_client_block_state *state = &client->client;
+ int err;
+ bool flush, nodelay;
+
+ err = vs_block_client_check_sector_size(client, bio);
+ if (err < 0)
+ return err;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ flush = (bio_flags(bio) & REQ_PREFLUSH);
+ nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+ flush = (bio->bi_rw & REQ_FLUSH);
+ nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+ do {
+ /* Wait until it's possible to send a read request */
+ err = vs_wait_state_nointr(state,
+ vs_client_block_io_req_read_can_send(state));
+ if (err == -ECANCELED)
+ err = -ENXIO;
+ if (err < 0)
+ break;
+
+ /* Wait for quota, while sending a read remains possible */
+ err = vs_wait_send_nointr(state,
+ vs_client_block_io_req_read_can_send(state),
+ vs_client_block_io_req_read(state, bio,
+ bio_sector(bio), bio_sectors(bio),
+ nodelay, flush, GFP_KERNEL));
+ } while (err == -ECANCELED);
+
+ return err;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+static blk_qc_t
+#else
+static void
+#endif
+vs_block_client_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+ struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+ struct block_client *client;
+ int err = 0;
+
+ client = blkdev->client;
+ if (!client || !kref_get_unless_zero(&client->kref)) {
+ err = -ENODEV;
+ goto fail_get_client;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ blk_queue_split(q, &bio, q->bio_split);
+#endif
+
+ if (!vs_state_lock_safe(&client->client)) {
+ err = -ENODEV;
+ goto fail_lock_client;
+ }
+
+ if (client->blkdev != blkdev) {
+ /* Client has reset, this block device is no longer usable */
+ err = -EIO;
+ goto fail_check_client;
+ }
+
+ if (bio_data_dir(bio) == WRITE)
+ err = block_client_send_write_req(client, bio);
+ else
+ err = block_client_send_read_req(client, bio);
+
+fail_check_client:
+ if (err == -ENOLINK)
+ err = -EIO;
+ else
+ vs_state_unlock(&client->client);
+fail_lock_client:
+ vs_block_client_put(client);
+fail_get_client:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ if (err < 0) {
+ bio->bi_error = err;
+ bio_endio(bio);
+ }
+#else
+ if (err < 0)
+ bio_endio(bio, err);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+ return BLK_QC_T_NONE;
+#endif
+}
+
+static int vs_block_client_get_blkdev_id(struct block_client *client)
+{
+ int id;
+ int ret;
+
+retry:
+ ret = ida_pre_get(&vs_block_ida, GFP_KERNEL);
+ if (ret == 0)
+ return -ENOMEM;
+
+ mutex_lock(&vs_block_ida_lock);
+ ret = ida_get_new(&vs_block_ida, &id);
+ mutex_unlock(&vs_block_ida_lock);
+
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return id;
+}
+
+static int vs_block_client_disk_add(struct block_client *client)
+{
+ struct vs_block_device *blkdev;
+ unsigned int max_hw_sectors;
+ int err;
+
+ dev_dbg(&client->service->dev, "device add\n");
+
+ blkdev = kzalloc(sizeof(*blkdev), GFP_KERNEL);
+ if (!blkdev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ kref_init(&blkdev->kref);
+ blkdev->id = vs_block_client_get_blkdev_id(client);
+ if (blkdev->id < 0) {
+ err = blkdev->id;
+ goto fail_free_blkdev;
+ }
+
+ if ((blkdev->id * PERDEV_MINORS) >> MINORBITS) {
+ err = -ENODEV;
+ goto fail_remove_ida;
+ }
+
+ blkdev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!blkdev->queue) {
+ dev_err(&client->service->dev,
+ "Error initializing blk queue\n");
+ err = -ENOMEM;
+ goto fail_remove_ida;
+ }
+
+ blk_queue_make_request(blkdev->queue, vs_block_client_make_request);
+ blk_queue_bounce_limit(blkdev->queue, BLK_BOUNCE_ANY);
+ blk_queue_dma_alignment(blkdev->queue, 0);
+
+ /*
+ * Mark this as a paravirtualised device. This is just an alias
+ * of QUEUE_FLAG_NONROT, which prevents the I/O schedulers trying
+ * to wait for the disk to spin.
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, blkdev->queue);
+
+ blkdev->queue->queuedata = blkdev;
+
+ blkdev->client = client;
+ kref_get(&client->kref);
+
+ max_hw_sectors = min_t(sector_t, BLK_DEF_MAX_SECTORS,
+ client->client.segment_size /
+ client->client.sector_size);
+ blk_queue_max_hw_sectors(blkdev->queue, max_hw_sectors);
+ blk_queue_logical_block_size(blkdev->queue,
+ client->client.sector_size);
+ blk_queue_physical_block_size(blkdev->queue,
+ client->client.sector_size);
+
+ blkdev->disk = alloc_disk(PERDEV_MINORS);
+ if (!blkdev->disk) {
+ dev_err(&client->service->dev, "Error allocating disk\n");
+ err = -ENOMEM;
+ goto fail_free_blk_queue;
+ }
+
+ if (client->client.readonly) {
+ dev_dbg(&client->service->dev, "set device as readonly\n");
+ set_disk_ro(blkdev->disk, true);
+ }
+
+ blkdev->disk->major = block_client_major;
+ blkdev->disk->first_minor = blkdev->id * PERDEV_MINORS;
+ blkdev->disk->fops = &block_client_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+ blkdev->disk->driverfs_dev = &client->service->dev;
+#endif
+ blkdev->disk->private_data = blkdev;
+ blkdev->disk->queue = blkdev->queue;
+ blkdev->disk->flags |= GENHD_FL_EXT_DEVT;
+
+ /*
+ * The block device name is vblock<x>, where x is a unique
+ * identifier. Userspace should rename or symlink the device
+ * appropriately, typically by processing the add uevent.
+ *
+ * If a virtual block device is reset then it may re-open with a
+ * different identifier if something still holds a reference to
+ * the old device (such as a userspace application having an open
+ * file handle).
+ */
+ snprintf(blkdev->disk->disk_name, sizeof(blkdev->disk->disk_name),
+ "%s%d", CLIENT_BLKDEV_NAME, blkdev->id);
+ set_capacity(blkdev->disk, client->client.device_sectors *
+ (client->client.sector_size >> 9));
+
+ /*
+ * We need to hold a reference on blkdev across add_disk(), to make
+ * sure a concurrent reset does not immediately release the blkdev
+ * and call del_gendisk().
+ */
+ kref_get(&blkdev->kref);
+
+ vs_service_state_lock(client->service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base)) {
+ vs_service_state_unlock(client->service);
+ err = -ENXIO;
+ goto fail_free_blk_queue;
+ }
+ client->blkdev = blkdev;
+ vs_service_state_unlock(client->service);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+ device_add_disk(&client->service->dev, blkdev->disk);
+#else
+ add_disk(blkdev->disk);
+#endif
+ dev_dbg(&client->service->dev, "added block disk '%s'\n",
+ blkdev->disk->disk_name);
+
+ /* Release the reference taken above. */
+ vs_block_device_put(blkdev);
+
+ return 0;
+
+fail_free_blk_queue:
+ blk_cleanup_queue(blkdev->queue);
+fail_remove_ida:
+ mutex_lock(&vs_block_ida_lock);
+ ida_remove(&vs_block_ida, blkdev->id);
+ mutex_unlock(&vs_block_ida_lock);
+fail_free_blkdev:
+ kfree(blkdev);
+fail:
+ return err;
+}
+
+static void vs_block_client_disk_creation_work(struct work_struct *work)
+{
+ struct block_client *client = container_of(work,
+ struct block_client, disk_creation_work);
+ struct vs_block_device *blkdev;
+ bool running;
+
+ vs_service_state_lock(client->service);
+ blkdev = client->blkdev;
+ running = VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base);
+
+ dev_dbg(&client->service->dev,
+ "disk changed: blkdev = %pK, running = %d\n",
+ client->blkdev, running);
+ if (!blkdev && running) {
+ dev_dbg(&client->service->dev, "adding block disk\n");
+ vs_service_state_unlock(client->service);
+ vs_block_client_disk_add(client);
+ } else {
+ vs_service_state_unlock(client->service);
+ }
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data);
+
+static struct vs_client_block_state *
+vs_block_client_alloc(struct vs_service_device *service)
+{
+ struct block_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ dev_err(&service->dev, "Error allocating client struct\n");
+ return NULL;
+ }
+
+ vs_get_service(service);
+ client->service = service;
+
+ INIT_LIST_HEAD(&client->rx_queue);
+ spin_lock_init(&client->rx_queue_lock);
+ tasklet_init(&client->rx_tasklet, vs_block_client_rx_tasklet,
+ (unsigned long)client);
+ tasklet_disable(&client->rx_tasklet);
+
+ INIT_WORK(&client->disk_creation_work,
+ vs_block_client_disk_creation_work);
+ kref_init(&client->kref);
+
+ dev_dbg(&service->dev, "New block client %pK\n", client);
+
+ return &client->client;
+}
+
+static void vs_block_client_release(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ flush_work(&client->disk_creation_work);
+
+ vs_block_client_put(client);
+}
+
+/* FIXME: Jira ticket SDK-2459 - anjaniv */
+static void vs_block_client_closed(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ /*
+ * Stop the RX bounce tasklet and clean up its queue. We can wait for
+ * it to stop safely because it doesn't need to acquire the state
+ * lock, only the RX lock which we acquire after it is disabled.
+ */
+ tasklet_disable(&client->rx_tasklet);
+ spin_lock(&client->rx_queue_lock);
+ while (!list_empty(&client->rx_queue)) {
+ struct vs_mbuf *mbuf = list_first_entry(&client->rx_queue,
+ struct vs_mbuf, queue);
+ struct vs_pbuf pbuf;
+ list_del(&mbuf->queue);
+ vs_client_block_io_getbufs_ack_read(state, &pbuf, mbuf);
+ vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+ }
+ spin_unlock(&client->rx_queue_lock);
+
+ if (client->blkdev) {
+ struct vs_block_device *blkdev = client->blkdev;
+ char service_remove[] = "REMOVING_SERVICE=1";
+ /* + 9 because "DEVNAME=" is 8 chars plus 1 for '\0' */
+ char devname[sizeof(blkdev->disk->disk_name) + 9];
+ char *envp[] = { service_remove, devname, NULL };
+
+ dev_dbg(&client->service->dev, "removing block disk\n");
+
+ /*
+ * Send a change event with DEVNAME to allow the block helper
+ * script to remove any server sessions which use either
+ * v${SERVICE_NAME} or ${DEVNAME}. The remove event generated
+ * by the session driver doesn't include DEVNAME so the only
+ * way for userspace to map SERVICE_NAME to DEVNAME is by the
+ * symlink added when the client service was created. If that
+ * symlink has been deleted, there's no other way to connect
+ * the two names.
+ */
+ snprintf(devname, sizeof(devname), "DEVNAME=%s",
+ blkdev->disk->disk_name);
+ kobject_uevent_env(&client->service->dev.kobj, KOBJ_CHANGE,
+ envp);
+
+ /*
+ * We are done with the device now. The block device will only
+ * get removed once there are no more users (e.g. userspace
+ * applications).
+ */
+ client->blkdev = NULL;
+ vs_block_device_put(blkdev);
+ }
+}
+
+static void vs_block_client_opened(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+#if !defined(CONFIG_LBDAF) && !defined(CONFIG_64BIT)
+ if ((state->device_sectors * (state->sector_size >> 9))
+ >> (sizeof(sector_t) * 8)) {
+ dev_err(&client->service->dev,
+ "Client doesn't support full capacity large block devices\n");
+ vs_client_block_close(state);
+ return;
+ }
+#endif
+
+ /* Unblock the RX bounce tasklet. */
+ tasklet_enable(&client->rx_tasklet);
+
+ /*
+ * The block device allocation needs to sleep, so we defer it to a
+ * work queue.
+ */
+ queue_work(client->service->work_queue, &client->disk_creation_work);
+}
+
+static int vs_block_client_ack_read(struct vs_client_block_state *state,
+ void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_client *client = state_to_block_client(state);
+ struct bio *bio = tag;
+ struct bio_vec *bvec;
+ int err = 0;
+ size_t bytes_read = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ struct bio_vec bvec_local;
+ struct bvec_iter iter;
+#else
+ int i;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ bvec = &bvec_local;
+ bio_for_each_segment(bvec_local, bio, iter)
+#else
+ bio_for_each_segment(bvec, bio, i)
+#endif
+ {
+ unsigned long flags;
+ void *buf;
+ if (vs_pbuf_size(&pbuf) < bytes_read + bvec->bv_len) {
+ dev_err(&client->service->dev,
+ "bio read overrun: %zu into %zu byte response, but need %zd bytes\n",
+ bytes_read, vs_pbuf_size(&pbuf),
+ (size_t)bvec->bv_len);
+ err = -EIO;
+ break;
+ }
+ buf = bvec_kmap_irq(bvec, &flags);
+ memcpy(buf, vs_pbuf_data(&pbuf) + bytes_read, bvec->bv_len);
+ flush_kernel_dcache_page(bvec->bv_page);
+ bvec_kunmap_irq(buf, &flags);
+ bytes_read += bvec->bv_len;
+ }
+
+ vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ if (err < 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+
+ return 0;
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data)
+{
+ struct block_client *client = (struct block_client *)data;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+
+ spin_lock(&client->rx_queue_lock);
+
+ /* The list shouldn't be empty. */
+ if (WARN_ON(list_empty(&client->rx_queue))) {
+ spin_unlock(&client->rx_queue_lock);
+ return;
+ }
+
+ /* Get the next mbuf, and reschedule ourselves if there are more. */
+ mbuf = list_first_entry(&client->rx_queue, struct vs_mbuf, queue);
+ list_del(&mbuf->queue);
+ if (!list_empty(&client->rx_queue))
+ tasklet_schedule(&client->rx_tasklet);
+
+ spin_unlock(&client->rx_queue_lock);
+
+ /* Process the ack. */
+ vs_client_block_io_getbufs_ack_read(&client->client, &pbuf, mbuf);
+ vs_block_client_ack_read(&client->client, mbuf->priv, pbuf, mbuf);
+}
+
+static int vs_block_client_queue_ack_read(struct vs_client_block_state *state,
+ void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ spin_lock(&client->rx_queue_lock);
+ list_add_tail(&mbuf->queue, &client->rx_queue);
+ mbuf->priv = tag;
+ spin_unlock(&client->rx_queue_lock);
+
+ tasklet_schedule(&client->rx_tasklet);
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static int vs_block_client_ack_write(struct vs_client_block_state *state,
+ void *tag)
+{
+ struct bio *bio = tag;
+
+ if (WARN_ON(!bio))
+ return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio_endio(bio);
+#else
+ bio_endio(bio, 0);
+#endif
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static int vs_block_client_nack_io(struct vs_client_block_state *state,
+ void *tag, vservice_block_block_io_error_t err)
+{
+ struct bio *bio = tag;
+
+ if (WARN_ON(!bio))
+ return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = block_client_vs_to_linux_error(err);
+ bio_endio(bio);
+#else
+ bio_endio(bio, block_client_vs_to_linux_error(err));
+#endif
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static struct vs_client_block block_client_driver = {
+ .rx_atomic = true,
+ .alloc = vs_block_client_alloc,
+ .release = vs_block_client_release,
+ .opened = vs_block_client_opened,
+ .closed = vs_block_client_closed,
+ .io = {
+ .ack_read = vs_block_client_queue_ack_read,
+ .nack_read = vs_block_client_nack_io,
+ .ack_write = vs_block_client_ack_write,
+ .nack_write = vs_block_client_nack_io,
+ }
+};
+
+static int __init vs_block_client_init(void)
+{
+ int err;
+
+ block_client_major = register_blkdev(0, CLIENT_BLKDEV_NAME);
+ if (block_client_major < 0) {
+ pr_err("Err registering blkdev\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ err = vservice_block_client_register(&block_client_driver,
+ "block_client_driver");
+ if (err)
+ goto fail_unregister_blkdev;
+
+ return 0;
+
+fail_unregister_blkdev:
+ unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+fail:
+ return err;
+}
+
+static void __exit vs_block_client_exit(void)
+{
+ vservice_block_client_unregister(&block_client_driver);
+ unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+}
+
+module_init(vs_block_client_init);
+module_exit(vs_block_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/block/vs_block_server.c b/drivers/block/vs_block_server.c
new file mode 100644
index 000000000000..42bc2734992b
--- /dev/null
+++ b/drivers/block/vs_block_server.c
@@ -0,0 +1,1185 @@
+/*
+ * drivers/block/vs_block_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice server driver
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#define VS_BLOCK_BLKDEV_DEFAULT_MODE FMODE_READ
+/* Must match Linux bio sector_size (512 bytes) */
+#define VS_BLOCK_BLK_DEF_SECTOR_SIZE 512
+/* XXX should lookup block device physical_block_size */
+#define VS_BLOCK_BLK_DEF_MIN_SECTORS 8
+
+/*
+ * Metadata for a request. Note that the bio must be embedded at the end of
+ * this structure, because it is allocated from a bioset.
+ */
+struct block_server_request {
+ struct block_server *server;
+ u32 tagid;
+ u32 size;
+ int op_err;
+ struct list_head list;
+ struct vs_pbuf pbuf;
+ struct vs_mbuf *mbuf;
+ bool bounced;
+ bool submitted;
+
+ struct bio bio;
+};
+
+struct block_server {
+ struct vs_server_block_state server;
+ struct vs_service_device *service;
+
+ struct block_device *bdev;
+ struct bio_set *bioset;
+
+ unsigned int sector_size;
+ bool started;
+
+ /* Bounced writes are deferred to keep memcpy off service queue */
+ struct list_head bounce_req_queue;
+ struct work_struct bounce_req_work;
+ spinlock_t bounce_req_lock;
+
+ /* Count of outstanding requests submitted to block layer */
+ atomic_t submitted_req_count;
+ wait_queue_head_t submitted_req_wq;
+
+ /* Completions are deferred because end_io may be in atomic context */
+ struct list_head completed_req_queue;
+ struct work_struct completed_req_work;
+ spinlock_t completed_req_lock;
+};
+
+#define state_to_block_server(state) \
+ container_of(state, struct block_server, server)
+
+#define dev_to_block_server(dev) \
+ state_to_block_server(dev_get_drvdata(dev))
+
+static inline vservice_block_block_io_error_t
+block_server_linux_to_vs_error(int err)
+{
+ /*
+ * This list is not exhaustive. For all other errors, we return
+ * unsupported_command.
+ */
+ switch (err) {
+ case -ECOMM:
+ case -EIO:
+ case -ENOMEM:
+ return VSERVICE_BLOCK_MEDIA_FAILURE;
+ case -ETIME:
+ case -ETIMEDOUT:
+ return VSERVICE_BLOCK_MEDIA_TIMEOUT;
+ case -EILSEQ:
+ return VSERVICE_BLOCK_INVALID_INDEX;
+ default:
+ if (err)
+ return VSERVICE_BLOCK_UNSUPPORTED_COMMAND;
+ return 0;
+ }
+
+ return 0;
+}
+
+static inline u32 vs_req_num_sectors(struct block_server *server,
+ struct block_server_request *req)
+{
+ return req->size / server->sector_size;
+}
+
+static inline u64 vs_req_sector_index(struct block_server_request *req)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ return req->bio.bi_iter.bi_sector;
+#else
+ return req->bio.bi_sector;
+#endif
+}
+
+static void vs_block_server_closed(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct block_server_request *req;
+
+ /*
+ * Fail all requests that haven't been sent to the block layer yet.
+ */
+ spin_lock(&server->bounce_req_lock);
+ while (!list_empty(&server->bounce_req_queue)) {
+ req = list_first_entry(&server->bounce_req_queue,
+ struct block_server_request, list);
+ list_del(&req->list);
+ spin_unlock(&server->bounce_req_lock);
+ bio_io_error(&req->bio);
+ spin_lock(&server->bounce_req_lock);
+ }
+ spin_unlock(&server->bounce_req_lock);
+
+ /*
+ * Wait until all outstanding requests to the block layer are
+ * complete.
+ */
+ wait_event(server->submitted_req_wq,
+ !atomic_read(&server->submitted_req_count));
+
+ /*
+ * Discard all the completed requests.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ while (!list_empty(&server->completed_req_queue)) {
+ req = list_first_entry(&server->completed_req_queue,
+ struct block_server_request, list);
+ list_del(&req->list);
+ if (req->mbuf) {
+ spin_unlock_irq(&server->completed_req_lock);
+ if (bio_data_dir(&req->bio) == WRITE)
+ vs_server_block_io_free_req_write(state,
+ &req->pbuf, req->mbuf);
+ else
+ vs_server_block_io_free_ack_read(state,
+ &req->pbuf, req->mbuf);
+ spin_lock_irq(&server->completed_req_lock);
+ }
+ bio_put(&req->bio);
+ }
+ spin_unlock_irq(&server->completed_req_lock);
+}
+
+static ssize_t
+vs_block_server_readonly_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int err;
+ unsigned long val;
+
+ vs_service_state_lock(server->service);
+ if (server->started) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ goto unlock;
+
+ if (bdev_read_only(server->bdev) && !val) {
+ dev_info(dev,
+ "Cannot set %s to read/write: read-only device\n",
+ server->service->name);
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ server->server.readonly = val;
+ err = count;
+
+unlock:
+ vs_service_state_unlock(server->service);
+
+ return err;
+}
+
+static ssize_t
+vs_block_server_readonly_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int cnt;
+
+ vs_service_state_lock(server->service);
+ cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->server.readonly);
+ vs_service_state_unlock(server->service);
+
+ return cnt;
+}
+
+static ssize_t
+vs_block_server_start_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int err;
+ unsigned long val;
+
+ vs_service_state_lock(server->service);
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ goto unlock;
+
+ if (!val && server->started) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (val && !server->started) {
+ server->started = true;
+
+ if (server->server.state.base.statenum ==
+ VSERVICE_BASE_STATE_CLOSED__OPEN)
+ vs_server_block_open_complete(&server->server,
+ VS_SERVER_RESP_SUCCESS);
+ }
+
+ err = count;
+unlock:
+ vs_service_state_unlock(server->service);
+
+ return err;
+}
+
+static ssize_t
+vs_block_server_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int cnt;
+
+ vs_service_state_lock(server->service);
+ cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->started);
+ vs_service_state_unlock(server->service);
+
+ return cnt;
+}
+
+static DEVICE_ATTR(start, S_IWUSR | S_IRUSR, vs_block_server_start_show,
+ vs_block_server_start_store);
+static DEVICE_ATTR(readonly, S_IWUSR | S_IRUSR, vs_block_server_readonly_show,
+ vs_block_server_readonly_store);
+
+static struct attribute *vs_block_server_dev_attrs[] = {
+ &dev_attr_start.attr,
+ &dev_attr_readonly.attr,
+ NULL,
+};
+
+static const struct attribute_group vs_block_server_attr_group = {
+ .attrs = vs_block_server_dev_attrs
+};
+
+/*
+ * Invoked by vs_server_block_handle_req_open() after receiving open
+ * requests to perform server specific initialisations
+ *
+ * The "delayed start" feature can be enforced here
+ */
+static vs_server_response_type_t
+vs_block_server_open(struct vs_server_block_state * _state)
+{
+ struct block_server *server = state_to_block_server(_state);
+
+ return (server->started) ? VS_SERVER_RESP_SUCCESS :
+ VS_SERVER_RESP_EXPLICIT_COMPLETE;
+}
+
+static int
+vs_block_server_complete_req_read(struct block_server_request *req)
+{
+ struct block_server *server = req->server;
+ struct vs_server_block_state *state = &server->server;
+ int err = -EIO;
+
+ if (req->op_err) {
+ err = req->op_err;
+ dev_dbg(&server->service->dev,
+ "read nack, err %d sector 0x%llx num 0x%x\n",
+ err, vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ if (req->mbuf)
+ vs_server_block_io_free_ack_read(state, &req->pbuf,
+ req->mbuf);
+
+ err = vs_server_block_io_send_nack_read(state, req->tagid,
+ block_server_linux_to_vs_error(err),
+ GFP_KERNEL);
+ } else {
+ if (req->bounced && !req->mbuf) {
+ req->mbuf = vs_server_block_io_alloc_ack_read(
+ &server->server, &req->pbuf,
+ GFP_KERNEL);
+ if (IS_ERR(req->mbuf)) {
+ err = PTR_ERR(req->mbuf);
+ req->mbuf = NULL;
+ }
+ }
+
+ if (req->bounced && req->mbuf) {
+ int i;
+ struct bio_vec *bv;
+ void *data = req->pbuf.data;
+
+ if (vs_pbuf_resize(&req->pbuf, req->size) < 0) {
+ bio_io_error(&req->bio);
+ return 0;
+ }
+
+ bio_for_each_segment_all(bv, &req->bio, i) {
+ memcpy(data, page_address(bv->bv_page) +
+ bv->bv_offset, bv->bv_len);
+ data += bv->bv_len;
+ __free_page(bv->bv_page);
+ }
+ req->bounced = false;
+ }
+
+ if (req->mbuf) {
+ dev_vdbg(&server->service->dev,
+ "read ack, sector 0x%llx num 0x%x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_ack_read(state,
+ req->tagid, req->pbuf, req->mbuf);
+
+ if (err && (err != -ENOBUFS)) {
+ vs_server_block_io_free_ack_read(state,
+ &req->pbuf, req->mbuf);
+ req->mbuf = NULL;
+ }
+ } else {
+ WARN_ON(!err || !req->bounced);
+ }
+ }
+
+ if (err && (err != -ENOBUFS))
+ dev_dbg(&server->service->dev,
+ "error %d sending read reply\n", err);
+ else if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+ return err;
+}
+
+static int
+vs_block_server_complete_req_write(struct block_server_request *req)
+{
+ struct block_server *server = req->server;
+ struct vs_server_block_state *state = &server->server;
+ int err;
+
+ WARN_ON(req->mbuf);
+
+ if (req->op_err) {
+ dev_dbg(&server->service->dev,
+ "write nack, err %d sector 0x%llx num 0x%x\n",
+ req->op_err, vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_nack_write(state, req->tagid,
+ block_server_linux_to_vs_error(req->op_err),
+ GFP_KERNEL);
+ } else {
+ dev_vdbg(&server->service->dev,
+ "write ack, sector 0x%llx num 0x%x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_ack_write(state, req->tagid,
+ GFP_KERNEL);
+ }
+
+ if (err && (err != -ENOBUFS))
+ dev_dbg(&server->service->dev,
+ "error %d sending write reply\n", err);
+ else if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+ return err;
+}
+
+static int vs_block_server_complete_req(struct block_server *server,
+ struct block_server_request *req)
+{
+ int err;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ req->bio.bi_iter.bi_idx = 0;
+#else
+ req->bio.bi_idx = 0;
+#endif
+ if (!vs_state_lock_safe(&server->server))
+ return -ENOLINK;
+
+ if (bio_data_dir(&req->bio) == WRITE)
+ err = vs_block_server_complete_req_write(req);
+ else
+ err = vs_block_server_complete_req_read(req);
+
+ vs_state_unlock(&server->server);
+
+ if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "bio %pK response out of quota, will retry\n", &req->bio);
+
+ return err;
+}
+
+static void vs_block_server_complete_requests_work(struct work_struct *work)
+{
+ struct block_server *server = container_of(work, struct block_server,
+ completed_req_work);
+ struct block_server_request *req;
+
+ vs_service_send_batch_start(server->service, false);
+
+ /*
+ * Send ack/nack responses for each completed request. If a request
+ * cannot be sent because we are over-quota then this function will
+ * return with a non-empty list, and the tx_ready handler will
+ * reschedule us when we are back under quota. In all other cases
+ * this function will return with an empty list.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ while (!list_empty(&server->completed_req_queue)) {
+ int err;
+ req = list_first_entry(&server->completed_req_queue,
+ struct block_server_request, list);
+ dev_vdbg(&server->service->dev, "complete bio %pK\n", &req->bio);
+ list_del(&req->list);
+ spin_unlock_irq(&server->completed_req_lock);
+
+ err = vs_block_server_complete_req(server, req);
+ if (err == -ENOBUFS) {
+ dev_vdbg(&server->service->dev, "defer bio %pK\n", &req->bio);
+ /*
+ * Couldn't send the completion; re-queue the request
+ * and exit. We'll start again when more quota becomes
+ * available.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ list_add_tail(&req->list,
+ &server->completed_req_queue);
+ break;
+ }
+
+ dev_vdbg(&server->service->dev, "free bio %pK err %d\n", &req->bio, err);
+ bio_put(&req->bio);
+
+ spin_lock_irq(&server->completed_req_lock);
+ }
+ spin_unlock_irq(&server->completed_req_lock);
+
+ vs_service_send_batch_end(server->service, true);
+}
+
+static int vs_block_server_tx_ready(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+
+ schedule_work(&server->completed_req_work);
+
+ return 0;
+}
+
+static bool vs_block_can_map_pbuf(struct request_queue *q,
+ struct vs_pbuf *pbuf, size_t size)
+{
+ /* The pbuf must satisfy the driver's alignment requirements. */
+ if (!blk_rq_aligned(q, (unsigned long)pbuf->data, size))
+ return false;
+
+ /*
+ * bios can only contain pages. Sometime the pbuf is in an IO region
+ * that has no struct page (e.g. a channel primary buffer), in which
+ * case we can't map it into a bio.
+ */
+ /* FIXME: Redmine issue #930 - philip. */
+ if (!pfn_valid(__pa(pbuf->data) >> PAGE_SHIFT))
+ return false;
+
+ return true;
+}
+
+static int vs_block_bio_map_pbuf(struct bio *bio, struct vs_pbuf *pbuf)
+{
+ int offset = offset_in_page((unsigned long)pbuf->data);
+ void *ptr = pbuf->data;
+ int size = pbuf->size;
+
+ while (size > 0) {
+ unsigned bytes = min_t(unsigned, PAGE_SIZE - offset, size);
+
+ if (bio_add_page(bio, virt_to_page(ptr), bytes,
+ offset) < bytes)
+ return -EIO;
+
+ ptr += bytes;
+ size -= bytes;
+ offset = 0;
+ }
+
+ return 0;
+}
+
+/* Read request handling */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_read_done(struct bio *bio, int err)
+#else
+static void vs_block_server_read_done(struct bio *bio)
+#endif
+{
+ unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ int err = bio->bi_error;
+#endif
+ struct block_server_request *req = container_of(bio,
+ struct block_server_request, bio);
+ struct block_server *server = req->server;
+ req->op_err = err;
+
+ spin_lock_irqsave(&server->completed_req_lock, flags);
+ if (req->mbuf)
+ list_add(&req->list, &server->completed_req_queue);
+ else
+ list_add_tail(&req->list, &server->completed_req_queue);
+ spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+ if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+ wake_up_all(&server->submitted_req_wq);
+
+ schedule_work(&server->completed_req_work);
+}
+
+/*
+ * TODO: this may need to split and chain the bio if it exceeds the physical
+ * segment limit of the device. Not clear whose responsibility that is; queue
+ * might do it for us (if there is one)
+ */
+#define vs_block_make_request(bio) generic_make_request(bio)
+
+static int vs_block_submit_read(struct block_server *server,
+ struct block_server_request *req, gfp_t gfp)
+{
+ struct request_queue *q = bdev_get_queue(server->bdev);
+ struct bio *bio = &req->bio;
+ int size = req->size;
+ int err = 0;
+
+ if (req->mbuf && vs_block_can_map_pbuf(q, &req->pbuf, size)) {
+ /*
+ * The mbuf is valid and the driver can directly access the
+ * pbuf, so we don't need a bounce buffer. Map the pbuf
+ * directly into the bio.
+ */
+ if (vs_pbuf_resize(&req->pbuf, size) < 0)
+ err = -EIO;
+ if (!err)
+ err = vs_block_bio_map_pbuf(bio, &req->pbuf);
+ } else {
+ /* We need a bounce buffer. First set up the bvecs. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_size = size;
+#else
+ bio->bi_size = size;
+#endif
+
+ while (size > 0) {
+ struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+ BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+ bvec->bv_page = NULL; /* Allocated below */
+ bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+ bvec->bv_offset = 0;
+
+ bio->bi_vcnt++;
+ size -= bvec->bv_len;
+ }
+
+ err = bio_alloc_pages(bio, gfp);
+ if (!err) {
+ blk_recount_segments(q, bio);
+ req->bounced = true;
+ }
+ }
+
+ if (err) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+ } else {
+ dev_vdbg(&server->service->dev,
+ "submit read req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+ }
+
+ return 0;
+}
+
+static int vs_block_server_io_req_read(struct vs_server_block_state *state,
+ u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+ bool flush)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct bio *bio;
+ struct block_server_request *req;
+ unsigned size = num_sects * server->sector_size;
+ unsigned op_flags = 0;
+
+ /*
+ * This nr_pages calculation assumes that the pbuf data is offset from
+ * the start of the size-aligned message buffer by more than 0 but
+ * less than one sector, which is always true for the current message
+ * layout generated by mill when we assume 512-byte sectors.
+ */
+ unsigned nr_pages = 1 + (size >> PAGE_SHIFT);
+
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, server->bioset);
+ if (!bio)
+ return -ENOMEM;
+ dev_vdbg(&server->service->dev, "alloc r bio %pK\n", bio);
+ req = container_of(bio, struct block_server_request, bio);
+
+ req->server = server;
+ req->tagid = tagid;
+ req->op_err = 0;
+ req->mbuf = NULL;
+ req->size = size;
+ req->bounced = false;
+ req->submitted = false;
+
+ if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ op_flags |= REQ_PREFLUSH;
+#else
+ op_flags |= REQ_FLUSH;
+#endif
+ }
+ if (nodelay) {
+ op_flags |= REQ_SYNC;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+ bio->bi_sector = (sector_t)sector_index;
+#endif
+ bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ bio_set_op_attrs(bio, REQ_OP_READ, op_flags);
+#else
+ bio->bi_rw = READ | op_flags;
+#endif
+ bio->bi_end_io = vs_block_server_read_done;
+
+ req->mbuf = vs_server_block_io_alloc_ack_read(state, &req->pbuf,
+ GFP_KERNEL);
+ if (IS_ERR(req->mbuf) && (PTR_ERR(req->mbuf) == -ENOBUFS)) {
+ /* Fall back to a bounce buffer */
+ req->mbuf = NULL;
+ } else if (IS_ERR(req->mbuf)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = PTR_ERR(req->mbuf);
+ bio_endio(bio);
+#else
+ bio_endio(bio, PTR_ERR(req->mbuf));
+#endif
+ return 0;
+ }
+
+ return vs_block_submit_read(server, req, GFP_KERNEL);
+}
+
+/* Write request handling */
+static int vs_block_submit_bounced_write(struct block_server *server,
+ struct block_server_request *req, gfp_t gfp)
+{
+ struct bio *bio = &req->bio;
+ void *data = req->pbuf.data;
+ struct bio_vec *bv;
+ int i;
+
+ if (bio_alloc_pages(bio, gfp | __GFP_NOWARN) < 0)
+ return -ENOMEM;
+ blk_recount_segments(bdev_get_queue(server->bdev), bio);
+ req->bounced = true;
+
+ /* Copy all the data into the bounce buffer */
+ bio_for_each_segment_all(bv, bio, i) {
+ memcpy(page_address(bv->bv_page) + bv->bv_offset, data,
+ bv->bv_len);
+ data += bv->bv_len;
+ }
+
+ vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+ req->mbuf);
+ req->mbuf = NULL;
+
+ dev_vdbg(&server->service->dev,
+ "submit bounced write req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+
+ return 0;
+}
+
+static void vs_block_server_write_bounce_work(struct work_struct *work)
+{
+ struct block_server *server = container_of(work, struct block_server,
+ bounce_req_work);
+ struct block_server_request *req;
+
+ spin_lock(&server->bounce_req_lock);
+ while (!list_empty(&server->bounce_req_queue)) {
+ req = list_first_entry(&server->bounce_req_queue,
+ struct block_server_request, list);
+ dev_vdbg(&server->service->dev, "write bio %pK\n", &req->bio);
+ list_del(&req->list);
+ spin_unlock(&server->bounce_req_lock);
+
+ if (vs_block_submit_bounced_write(server, req,
+ GFP_KERNEL) == -ENOMEM) {
+ spin_lock(&server->bounce_req_lock);
+ list_add(&req->list, &server->bounce_req_queue);
+ spin_unlock(&server->bounce_req_lock);
+ schedule_work(work);
+ return;
+ }
+
+ spin_lock(&server->bounce_req_lock);
+ }
+ spin_unlock(&server->bounce_req_lock);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_write_done(struct bio *bio, int err)
+#else
+static void vs_block_server_write_done(struct bio *bio)
+#endif
+{
+ unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ int err = bio->bi_error;
+#endif
+ struct block_server_request *req = container_of(bio,
+ struct block_server_request, bio);
+ struct block_server *server = req->server;
+
+ if (req->bounced) {
+ int i;
+ struct bio_vec *bv;
+ bio_for_each_segment_all(bv, bio, i)
+ __free_page(bv->bv_page);
+ } else if (req->mbuf) {
+ vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+ req->mbuf);
+ req->mbuf = NULL;
+ }
+
+ if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+ wake_up_all(&server->submitted_req_wq);
+
+ req->op_err = err;
+
+ spin_lock_irqsave(&server->completed_req_lock, flags);
+ list_add_tail(&req->list, &server->completed_req_queue);
+ spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+ schedule_work(&server->completed_req_work);
+}
+
+static int vs_block_server_io_req_write(struct vs_server_block_state *state,
+ u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+ bool flush, bool commit, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct request_queue *q = bdev_get_queue(server->bdev);
+ struct bio *bio;
+ struct block_server_request *req;
+ unsigned long data = (unsigned long)pbuf.data;
+ unsigned long start = data >> PAGE_SHIFT;
+ unsigned long end = (data + pbuf.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int err;
+ unsigned op_flags = 0;
+
+ bio = bio_alloc_bioset(GFP_KERNEL, end - start, server->bioset);
+ if (!bio)
+ return -ENOMEM;
+ dev_vdbg(&server->service->dev, "alloc w bio %pK\n", bio);
+ req = container_of(bio, struct block_server_request, bio);
+
+ req->server = server;
+ req->tagid = tagid;
+ req->op_err = 0;
+ req->mbuf = mbuf;
+ req->pbuf = pbuf;
+ req->size = server->sector_size * num_sects;
+ req->bounced = false;
+ req->submitted = false;
+
+ if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ op_flags |= REQ_PREFLUSH;
+#else
+ op_flags |= REQ_FLUSH;
+#endif
+ }
+ if (commit) {
+ op_flags |= REQ_FUA;
+ }
+ if (nodelay) {
+ op_flags |= REQ_SYNC;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+ bio->bi_sector = (sector_t)sector_index;
+#endif
+ bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ bio_set_op_attrs(bio, REQ_OP_WRITE, op_flags);
+#else
+ bio->bi_rw = WRITE | op_flags;
+#endif
+ bio->bi_end_io = vs_block_server_write_done;
+
+ if (pbuf.size < req->size) {
+ err = -EINVAL;
+ goto fail_bio;
+ }
+ if (WARN_ON(pbuf.size > req->size))
+ pbuf.size = req->size;
+
+ if (state->readonly) {
+ err = -EROFS;
+ goto fail_bio;
+ }
+
+ if (!vs_block_can_map_pbuf(q, &req->pbuf, req->pbuf.size)) {
+ /* We need a bounce buffer. First set up the bvecs. */
+ int size = pbuf.size;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_size = size;
+#else
+ bio->bi_size = size;
+#endif
+
+ while (size > 0) {
+ struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+ BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+ bvec->bv_page = NULL; /* Allocated later */
+ bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+ bvec->bv_offset = 0;
+
+ bio->bi_vcnt++;
+ size -= bvec->bv_len;
+ }
+
+ /*
+ * Defer the rest so we don't have to hold the state lock
+ * during alloc_page & memcpy
+ */
+ spin_lock(&server->bounce_req_lock);
+ list_add_tail(&req->list, &server->bounce_req_queue);
+ spin_unlock(&server->bounce_req_lock);
+ schedule_work(&server->bounce_req_work);
+
+ return 0;
+ }
+
+ /* No bounce needed; map the pbuf directly. */
+ err = vs_block_bio_map_pbuf(bio, &pbuf);
+ if (err < 0)
+ goto fail_bio;
+
+ dev_vdbg(&server->service->dev,
+ "submit direct write req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+
+ return 0;
+
+fail_bio:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+ return 0;
+}
+
+static struct block_device *
+vs_block_server_find_by_name(struct block_server *server)
+{
+ struct block_device *bdev = NULL;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ class_dev_iter_init(&iter, &block_class, NULL, NULL);
+ while (1) {
+ dev = class_dev_iter_next(&iter);
+ if (!dev)
+ break;
+
+ if (strcmp(dev_name(dev), server->service->name) == 0) {
+ bdev = blkdev_get_by_dev(dev->devt,
+ VS_BLOCK_BLKDEV_DEFAULT_MODE, NULL);
+ if (!IS_ERR_OR_NULL(bdev))
+ break;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ if (!dev || IS_ERR_OR_NULL(bdev))
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(&server->service->dev, "Attached to block device %s (%d:%d)\n",
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+ return bdev;
+}
+
+static struct block_device *
+vs_block_server_find_by_path(struct block_server *server, const char *base_path)
+{
+ struct block_device *bdev;
+ char *bdev_path;
+
+ bdev_path = kasprintf(GFP_KERNEL, "%s/%s", base_path,
+ server->service->name);
+ if (!bdev_path)
+ return ERR_PTR(-ENOMEM);
+
+ bdev = blkdev_get_by_path(bdev_path, VS_BLOCK_BLKDEV_DEFAULT_MODE,
+ NULL);
+ dev_dbg(&server->service->dev, "Attached to block device %s\n",
+ bdev_path);
+
+ kfree(bdev_path);
+
+ if (!bdev)
+ return ERR_PTR(-ENODEV);
+ return bdev;
+}
+
+static struct block_device *
+vs_block_server_attach_block_device(struct block_server *server)
+{
+ const char *paths[] = {
+ "/dev",
+ "/dev/block",
+ "/dev/mapper",
+ "/dev/disk/by-partlabel",
+ "/dev/disk/by-label",
+ "/dev/disk/by-partuuid",
+ "/dev/disk/by-uuid"
+ };
+ struct block_device *bdev;
+ int i;
+
+ /*
+ * Try first to look the block device up by path. This is done because
+ * the name exposed to user-space in /dev/ is not necessarily the name
+ * being used inside the kernel for the device.
+ */
+ for (i = 0; i < ARRAY_SIZE(paths); i++) {
+ bdev = vs_block_server_find_by_path(server, paths[i]);
+ if (!IS_ERR(bdev))
+ break;
+ }
+ if (i == ARRAY_SIZE(paths)) {
+ /*
+ * Couldn't find the block device in any of the usual places.
+ * Try to match it against the kernel's device name. If the
+ * name of the service and the name of a device in the block
+ * class match then attempt to look the block device up by the
+ * dev_t (major/minor) value.
+ */
+ bdev = vs_block_server_find_by_name(server);
+ }
+ if (IS_ERR(bdev))
+ return bdev;
+
+ // XXX get block device physical block size
+ server->sector_size = VS_BLOCK_BLK_DEF_SECTOR_SIZE;
+ server->server.segment_size = round_down(
+ vs_service_max_mbuf_size(server->service) -
+ sizeof(vs_message_id_t), server->sector_size);
+ server->server.sector_size = server->sector_size *
+ VS_BLOCK_BLK_DEF_MIN_SECTORS;
+ server->server.device_sectors = bdev->bd_part->nr_sects /
+ VS_BLOCK_BLK_DEF_MIN_SECTORS;
+ if (bdev_read_only(bdev))
+ server->server.readonly = true;
+ server->server.flushable = true;
+ server->server.committable = true;
+
+ return bdev;
+}
+
+static struct vs_server_block_state *
+vs_block_server_alloc(struct vs_service_device *service)
+{
+ struct block_server *server;
+ int err;
+
+ server = kzalloc(sizeof(*server), GFP_KERNEL);
+ if (!server)
+ return NULL;
+
+ server->service = service;
+ server->started = false;
+ INIT_LIST_HEAD(&server->bounce_req_queue);
+ INIT_WORK(&server->bounce_req_work, vs_block_server_write_bounce_work);
+ spin_lock_init(&server->bounce_req_lock);
+ atomic_set(&server->submitted_req_count, 0);
+ init_waitqueue_head(&server->submitted_req_wq);
+ INIT_LIST_HEAD(&server->completed_req_queue);
+ INIT_WORK(&server->completed_req_work,
+ vs_block_server_complete_requests_work);
+ spin_lock_init(&server->completed_req_lock);
+
+ server->bdev = vs_block_server_attach_block_device(server);
+ if (IS_ERR(server->bdev)) {
+ dev_err(&server->service->dev,
+ "No appropriate block device was found to satisfy the service name %s - error %ld\n",
+ server->service->name, PTR_ERR(server->bdev));
+ goto fail_attach_device;
+ }
+
+ dev_set_drvdata(&service->dev, &server->server);
+
+ err = sysfs_create_group(&service->dev.kobj,
+ &vs_block_server_attr_group);
+ if (err) {
+ dev_err(&service->dev,
+ "Failed to create attribute group for service %s\n",
+ service->name);
+ goto fail_create_group;
+ }
+
+ /*
+ * We know the upper bound on simultaneously active bios (i.e. the
+ * smaller of the in quota, and the sum of the read and write command
+ * tag limits), so we can pre-allocate that many, and hopefully never
+ * fail to allocate one in a request handler.
+ *
+ * However, allocation may fail if the number of pages (and thus
+ * bvecs) in a request exceeds BIO_INLINE_VECS (which is hard-coded to
+ * 4 in all mainline kernels). That possibility is the only reason we
+ * can't enable rx_atomic for this driver.
+ */
+ server->bioset = bioset_create(min_t(unsigned, service->recv_quota,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING +
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING),
+ offsetof(struct block_server_request, bio));
+ if (!server->bioset) {
+ dev_err(&service->dev,
+ "Failed to allocate bioset for service %s\n",
+ service->name);
+ goto fail_create_bioset;
+ }
+
+ dev_dbg(&service->dev, "New block server %pK\n", server);
+
+ return &server->server;
+
+fail_create_bioset:
+ sysfs_remove_group(&server->service->dev.kobj,
+ &vs_block_server_attr_group);
+fail_create_group:
+ dev_set_drvdata(&service->dev, NULL);
+ blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+fail_attach_device:
+ kfree(server);
+
+ return NULL;
+}
+
+static void vs_block_server_release(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+
+ cancel_work_sync(&server->bounce_req_work);
+ cancel_work_sync(&server->completed_req_work);
+
+ blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+
+ sysfs_remove_group(&server->service->dev.kobj,
+ &vs_block_server_attr_group);
+
+ bioset_free(server->bioset);
+
+ kfree(server);
+}
+
+static struct vs_server_block block_server_driver = {
+ .alloc = vs_block_server_alloc,
+ .release = vs_block_server_release,
+ .open = vs_block_server_open,
+ .closed = vs_block_server_closed,
+ .tx_ready = vs_block_server_tx_ready,
+ .io = {
+ .req_read = vs_block_server_io_req_read,
+ .req_write = vs_block_server_io_req_write,
+ },
+
+ /* Large default quota for batching read/write commands */
+ .in_quota_best = 32,
+ .out_quota_best = 32,
+};
+
+static int __init vs_block_server_init(void)
+{
+ return vservice_block_server_register(&block_server_driver,
+ "block_server_driver");
+}
+
+static void __exit vs_block_server_exit(void)
+{
+ vservice_block_server_unregister(&block_server_driver);
+}
+
+module_init(vs_block_server_init);
+module_exit(vs_block_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b0d01813a2ce..87d8c3c4d4d2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -623,3 +623,49 @@ config MSM_RDBG
endmenu
+config OKL4_PIPE
+ bool "OKL4 Pipe Driver"
+ depends on OKL4_GUEST
+ default n
+ help
+ Virtual pipe driver for the OKL4 Microvisor. This driver allows
+ OKL4 Microvisor pipes to be exposed directly to user level as
+ character devices.
+
+config VSERVICES_SERIAL
+ tristate
+
+config VSERVICES_SERIAL_SERVER
+ tristate "Virtual Services serial server"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_SERIAL
+ select VSERVICES_PROTOCOL_SERIAL_SERVER
+ default y
+ help
+ Select this option if you want support for server side Virtual
+ Services serial. A virtual serial service behaves similarly to
+ a UNIX pseudo terminal (pty), and does not require any physical
+ serial hardware. Virtual serial devices are typically called
+ /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_SERIAL_CLIENT
+ tristate "Virtual Services serial client"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_SERIAL
+ select VSERVICES_PROTOCOL_SERIAL_CLIENT
+ default y
+ help
+ Select this option if you want support for client side Virtual
+ Services serial. A virtual serial service behaves similarly to
+ a UNIX pseudo terminal (pty), and does not require any physical
+ serial hardware. Virtual serial devices are typically called
+ /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_VTTY_COUNT
+ int "Maximum number of Virtual Services serial devices"
+ depends on VSERVICES_SERIAL
+ range 0 256
+ default "8"
+ help
+ The maximum number of Virtual Services serial devices to support.
+ This limit applies to both the client and server.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 81283c4642f6..a00142ae018a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -67,3 +67,11 @@ ifdef CONFIG_COMPAT
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
endif
obj-$(CONFIG_MSM_RDBG) += rdbg.o
+obj-$(CONFIG_OKL4_PIPE) += okl4_pipe.o
+CFLAGS_okl4_pipe.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL) += vservices_serial.o
+CFLAGS_vservices_serial.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_CLIENT) += vs_serial_client.o
+CFLAGS_vs_serial_client.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_SERVER) += vs_serial_server.o
+CFLAGS_vs_serial_server.o += -Werror
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index ce0c7bb2fbef..46de6a67a0a4 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -194,6 +194,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
}
found = 0;
+ mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients && !found; i++) {
if ((driver->client_map[i].pid != pid) ||
(driver->client_map[i].pid == 0))
@@ -207,6 +208,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
+ mutex_unlock(&driver->diagchar_mutex);
if (!found)
return -EINVAL;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 651fb1502296..8aae4b4dca0b 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -385,6 +385,8 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
goto end;
}
}
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
if (write_len > 0) {
err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
@@ -392,18 +394,18 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
if (err) {
pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
__func__, err);
- goto end;
+ goto end_write;
}
}
- mutex_unlock(&fwd_info->data_mutex);
- mutex_unlock(&driver->hdlc_disable_mutex);
+
diagfwd_queue_read(fwd_info);
return;
end:
- diag_ws_release();
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
+end_write:
+ diag_ws_release();
if (buf) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
@@ -690,24 +692,26 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
}
}
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+
if (write_len > 0) {
err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
temp_buf->ctxt);
if (err) {
pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
__func__, err);
- goto end;
+ goto end_write;
}
}
- mutex_unlock(&fwd_info->data_mutex);
- mutex_unlock(&driver->hdlc_disable_mutex);
diagfwd_queue_read(fwd_info);
return;
end:
- diag_ws_release();
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
+end_write:
+ diag_ws_release();
if (temp_buf) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
diff --git a/drivers/char/okl4_pipe.c b/drivers/char/okl4_pipe.c
new file mode 100644
index 000000000000..e7a0d8a1e963
--- /dev/null
+++ b/drivers/char/okl4_pipe.c
@@ -0,0 +1,677 @@
+/*
+ * drivers/char/okl4_pipe.c
+ *
+ * Copyright (c) 2015 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Pipes driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "pipe%d", where %d is the pipe number, which must be
+ * unique and less than MAX_PIPES.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <asm/uaccess.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if defined(CONFIG_OKL4_VIRTUALISATION)
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
+#define __devinit
+#define __devexit
+#define __devexit_p(x) x
+#endif
+
+#define DRIVER_NAME "okl4-pipe"
+#define DEVICE_NAME "okl4-pipe"
+
+#ifndef CONFIG_OF
+#error "okl4-pipe driver only supported on device tree kernels"
+#endif
+
+#define MAX_PIPES 8
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static int okl4_pipe_major;
+static struct class *okl4_pipe_class;
+
+/* This can be extended if required */
+struct okl4_pipe_mv {
+ int pipe_id;
+};
+
+struct okl4_pipe {
+ struct okl4_pipe_data_buffer *write_buf;
+ okl4_kcap_t pipe_tx_kcap;
+ okl4_kcap_t pipe_rx_kcap;
+ int tx_irq;
+ int rx_irq;
+ size_t max_msg_size;
+ int ref_count;
+ struct mutex pipe_mutex;
+ spinlock_t pipe_lock;
+
+ struct platform_device *pdev;
+ struct cdev cdev;
+
+ bool reset;
+ bool tx_maybe_avail;
+ bool rx_maybe_avail;
+
+ wait_queue_head_t rx_wait_q;
+ wait_queue_head_t tx_wait_q;
+ wait_queue_head_t poll_wait_q;
+
+ char *rx_buf;
+ size_t rx_buf_count;
+};
+static struct okl4_pipe pipes[MAX_PIPES];
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x, control);
+ return _okl4_sys_pipe_control(kcap, x);
+}
+
+static irqreturn_t
+okl4_pipe_tx_irq(int irq, void *dev)
+{
+ struct okl4_pipe *pipe = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&pipe->pipe_lock);
+ if (okl4_pipe_state_gettxavailable(&payload))
+ pipe->tx_maybe_avail = true;
+ if (okl4_pipe_state_getreset(&payload)) {
+ pipe->reset = true;
+ pipe->tx_maybe_avail = true;
+ }
+ spin_unlock(&pipe->pipe_lock);
+
+ wake_up_interruptible(&pipe->tx_wait_q);
+ wake_up_interruptible(&pipe->poll_wait_q);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+okl4_pipe_rx_irq(int irq, void *dev)
+{
+ struct okl4_pipe *pipe = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&pipe->pipe_lock);
+ if (okl4_pipe_state_getrxavailable(&payload))
+ pipe->rx_maybe_avail = true;
+ if (okl4_pipe_state_getreset(&payload)) {
+ pipe->reset = true;
+ pipe->rx_maybe_avail = true;
+ }
+ spin_unlock(&pipe->pipe_lock);
+
+ wake_up_interruptible(&pipe->rx_wait_q);
+ wake_up_interruptible(&pipe->poll_wait_q);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+okl4_pipe_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ struct _okl4_sys_pipe_recv_return recv_return;
+ uint32_t *buffer = NULL;
+ size_t recv = 0;
+
+ if (!count)
+ return 0;
+
+again:
+ if (pipe->reset)
+ return -EPIPE;
+
+ if (!pipe->rx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(pipe->rx_wait_q, pipe->rx_maybe_avail))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pipe->pipe_mutex))
+ return -ERESTARTSYS;
+
+ /* Receive buffered data first */
+ if (pipe->rx_buf_count) {
+ recv = min(pipe->rx_buf_count, count);
+
+ if (copy_to_user(buf, pipe->rx_buf, recv)) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -EFAULT;
+ }
+
+ pipe->rx_buf_count -= recv;
+
+ if (pipe->rx_buf_count) {
+ memmove(pipe->rx_buf, pipe->rx_buf + recv,
+ pipe->max_msg_size - recv);
+ }
+
+ buf += recv;
+ count -= recv;
+ if (!count) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return recv;
+ }
+ }
+
+ buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+ if (!buffer) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -ENOMEM;
+ }
+
+ while (count) {
+ okl4_error_t ret;
+ size_t size;
+
+ spin_lock_irq(&pipe->pipe_lock);
+ recv_return = _okl4_sys_pipe_recv(pipe->pipe_rx_kcap,
+ pipe->max_msg_size + sizeof(uint32_t),
+ (void *)buffer);
+ ret = recv_return.error;
+
+ if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+ ret == OKL4_ERROR_PIPE_EMPTY) {
+ pipe->rx_maybe_avail = false;
+ if (!recv) {
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ spin_unlock_irq(&pipe->pipe_lock);
+ mutex_unlock(&pipe->pipe_mutex);
+ kfree(buffer);
+ goto again;
+ }
+ recv = -EAGAIN;
+ }
+ goto error;
+ } else if (ret != OKL4_OK) {
+ dev_err(&pipe->pdev->dev,
+ "pipe send returned error %d in okl4_pipe driver!\n",
+ (int)ret);
+ if (!recv)
+ recv = -ENXIO;
+ goto error;
+ }
+
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ size = buffer[0];
+ if (size > pipe->max_msg_size) {
+ /* pipe error */
+ if (!recv)
+ recv = -EPROTO;
+ goto out;
+ }
+
+ /* Save extra received data */
+ if (size > count) {
+ pipe->rx_buf_count = size - count;
+ memcpy(pipe->rx_buf, (char*)&buffer[1] + count,
+ size - count);
+ size = count;
+ }
+
+ if (copy_to_user(buf, &buffer[1], size)) {
+ if (!recv)
+ recv = -EFAULT;
+ goto out;
+ }
+
+
+ count -= size;
+ buf += size;
+ recv += size;
+ }
+out:
+ mutex_unlock(&pipe->pipe_mutex);
+
+ kfree(buffer);
+ return recv;
+error:
+ spin_unlock_irq(&pipe->pipe_lock);
+ goto out;
+}
+
+static ssize_t
+okl4_pipe_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ uint32_t *buffer = NULL;
+ size_t sent = 0;
+
+ if (!count)
+ return 0;
+
+again:
+ if (pipe->reset)
+ return -EPIPE;
+
+ if (!pipe->tx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(pipe->tx_wait_q, pipe->tx_maybe_avail))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pipe->pipe_mutex))
+ return -ERESTARTSYS;
+
+ buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+ if (!buffer) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -ENOMEM;
+ }
+
+ while (count) {
+ okl4_error_t ret;
+ size_t size = min(count, pipe->max_msg_size);
+ size_t pipe_size = roundup(size + sizeof(uint32_t),
+ sizeof(uint32_t));
+
+ if (copy_from_user(&buffer[1], buf, size)) {
+ if (!sent)
+ sent = -EFAULT;
+ break;
+ }
+
+ buffer[0] = size;
+
+ spin_lock_irq(&pipe->pipe_lock);
+ ret = _okl4_sys_pipe_send(pipe->pipe_tx_kcap, pipe_size,
+ (void *)buffer);
+ if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+ ret == OKL4_ERROR_PIPE_FULL) {
+ pipe->tx_maybe_avail = false;
+ spin_unlock_irq(&pipe->pipe_lock);
+ if (!sent) {
+ if (filp->f_flags & O_NONBLOCK) {
+ sent = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&pipe->pipe_mutex);
+ kfree(buffer);
+ goto again;
+ }
+ break;
+ } else if (ret != OKL4_OK) {
+ dev_err(&pipe->pdev->dev,
+ "pipe send returned error %d in okl4_pipe driver!\n",
+ (int)ret);
+ if (!sent)
+ sent = -ENXIO;
+ spin_unlock_irq(&pipe->pipe_lock);
+ break;
+ }
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ count -= size;
+ buf += size;
+ sent += size;
+ }
+ mutex_unlock(&pipe->pipe_mutex);
+
+ kfree(buffer);
+ return sent;
+}
+
+
+static unsigned int
+okl4_pipe_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ unsigned int ret = 0;
+
+ poll_wait(filp, &pipe->poll_wait_q, poll_table);
+
+ spin_lock_irq(&pipe->pipe_lock);
+
+ if (pipe->rx_maybe_avail)
+ ret |= POLLIN | POLLRDNORM;
+ if (pipe->tx_maybe_avail)
+ ret |= POLLOUT | POLLWRNORM;
+ if (pipe->reset)
+ ret = POLLHUP;
+
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ return ret;
+}
+
+static int
+okl4_pipe_open(struct inode *inode, struct file *filp)
+{
+ struct okl4_pipe *pipe = container_of(inode->i_cdev,
+ struct okl4_pipe, cdev);
+ struct okl4_pipe_mv *priv = dev_get_drvdata(&pipe->pdev->dev);
+
+ filp->private_data = priv;
+ if (!pipe->ref_count) {
+ pipe->rx_buf = kmalloc(pipe->max_msg_size, GFP_KERNEL);
+ if (!pipe->rx_buf)
+ return -ENOMEM;
+
+ mutex_init(&pipe->pipe_mutex);
+ spin_lock_init(&pipe->pipe_lock);
+
+ pipe->rx_buf_count = 0;
+ pipe->reset = false;
+ pipe->tx_maybe_avail = true;
+ pipe->rx_maybe_avail = true;
+
+ okl4_pipe_control(pipe->pipe_tx_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ okl4_pipe_control(pipe->pipe_rx_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+ }
+ pipe->ref_count++;
+ return 0;
+}
+
+static int
+okl4_pipe_close(struct inode *inode, struct file *filp)
+{
+ struct okl4_pipe *pipe = container_of(inode->i_cdev,
+ struct okl4_pipe, cdev);
+
+ pipe->ref_count--;
+ if (!pipe->ref_count) {
+ okl4_pipe_control(pipe->pipe_rx_kcap,
+ OKL4_PIPE_CONTROL_OP_RESET);
+ okl4_pipe_control(pipe->pipe_tx_kcap,
+ OKL4_PIPE_CONTROL_OP_RESET);
+
+ if (pipe->rx_buf)
+ kfree(pipe->rx_buf);
+ pipe->rx_buf = NULL;
+ pipe->rx_buf_count = 0;
+ }
+
+ return 0;
+}
+
+struct file_operations okl4_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = okl4_pipe_read,
+ .write = okl4_pipe_write,
+ .open = okl4_pipe_open,
+ .release = okl4_pipe_close,
+ .poll = okl4_pipe_poll,
+};
+
+static int __devinit
+okl4_pipe_probe(struct platform_device *pdev)
+{
+ struct okl4_pipe *pipe;
+ int err, pipe_id;
+ struct okl4_pipe_mv *priv;
+ dev_t dev_num;
+ struct device *device = NULL;
+ u32 reg[2];
+ struct resource *irq;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct okl4_pipe_mv),
+ GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto fail_alloc_priv;
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ pipe_id = of_alias_get_id(pdev->dev.of_node, "pipe");
+ if (pipe_id < 0) {
+ err = -ENXIO;
+ goto fail_pipe_id;
+ }
+
+ if (pipe_id < 0 || pipe_id >= MAX_PIPES) {
+ err = -ENXIO;
+ goto fail_pipe_id;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+ dev_err(&pdev->dev, "need 2 reg resources\n");
+ err = -ENODEV;
+ goto fail_pipe_id;
+ }
+
+ /* Populate the private structure */
+ priv->pipe_id = pipe_id;
+
+ pipe = &pipes[pipe_id];
+
+ /* Set up and register the pipe device */
+ pipe->pdev = pdev;
+ dev_set_name(&pdev->dev, "%s%d", DEVICE_NAME, (int)pipe_id);
+
+ pipe->ref_count = 0;
+ pipe->pipe_tx_kcap = reg[0];
+ pipe->pipe_rx_kcap = reg[1];
+ pipe->max_msg_size = 64;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no tx irq resource?\n");
+ err = -ENODEV;
+ goto fail_irq_resource;
+ }
+ pipe->tx_irq = irq->start;
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!irq) {
+ dev_err(&pdev->dev, "no rx irq resource?\n");
+ err = -ENODEV;
+ goto fail_irq_resource;
+ }
+ pipe->rx_irq = irq->start;
+
+ pipe->write_buf = kmalloc(sizeof(pipe->write_buf), GFP_KERNEL);
+ if (!pipe->write_buf) {
+ dev_err(&pdev->dev, "cannot allocate write buffer\n");
+ err = -ENOMEM;
+ goto fail_malloc_write;
+ }
+
+ init_waitqueue_head(&pipe->rx_wait_q);
+ init_waitqueue_head(&pipe->tx_wait_q);
+ init_waitqueue_head(&pipe->poll_wait_q);
+
+ err = devm_request_irq(&pdev->dev, pipe->rx_irq,
+ okl4_pipe_rx_irq, 0, dev_name(&pdev->dev),
+ pipe);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register rx irq %d: %d\n",
+ (int)pipe->rx_irq, (int)err);
+ goto fail_request_rx_irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, pipe->tx_irq,
+ okl4_pipe_tx_irq, 0, dev_name(&pdev->dev),
+ pipe);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register tx irq %d: %d\n",
+ (int)pipe->tx_irq, (int)err);
+ goto fail_request_tx_irq;
+ }
+
+ dev_num = MKDEV(okl4_pipe_major, pipe_id);
+
+ cdev_init(&pipe->cdev, &okl4_pipe_fops);
+ pipe->cdev.owner = THIS_MODULE;
+ err = cdev_add(&pipe->cdev, dev_num, 1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot add device: %d\n", (int)err);
+ goto fail_cdev_add;
+ }
+
+ device = device_create(okl4_pipe_class, NULL, dev_num, NULL,
+ DEVICE_NAME "%d", pipe_id);
+ if (IS_ERR(device)) {
+ err = PTR_ERR(device);
+ dev_err(&pdev->dev, "cannot create device: %d\n", (int)err);
+ goto fail_device_create;
+ }
+
+ return 0;
+
+fail_device_create:
+ cdev_del(&pipe->cdev);
+fail_cdev_add:
+ devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+fail_request_tx_irq:
+ devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+fail_request_rx_irq:
+ kfree(pipe->write_buf);
+fail_malloc_write:
+fail_irq_resource:
+fail_pipe_id:
+ dev_set_drvdata(&pdev->dev, NULL);
+ devm_kfree(&pdev->dev, priv);
+fail_alloc_priv:
+ return err;
+}
+
+static int __devexit
+okl4_pipe_remove(struct platform_device *pdev)
+{
+ struct okl4_pipe *pipe;
+ struct okl4_pipe_mv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (priv->pipe_id < 0 || priv->pipe_id >= MAX_PIPES)
+ return -ENXIO;
+
+ pipe = &pipes[priv->pipe_id];
+
+ cdev_del(&pipe->cdev);
+
+ devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+ devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+
+ kfree(pipe->write_buf);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ devm_kfree(&pdev->dev, priv);
+
+ return 0;
+}
+
+static const struct of_device_id okl4_pipe_match[] = {
+ {
+ .compatible = "okl,pipe",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, okl4_pipe_match);
+
+static struct platform_driver okl4_pipe_driver = {
+ .probe = okl4_pipe_probe,
+ .remove = __devexit_p(okl4_pipe_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = okl4_pipe_match,
+ },
+};
+
+static int __init
+okl4_pipe_init(void)
+{
+ int err;
+ dev_t dev_num = 0;
+
+ err = alloc_chrdev_region(&dev_num, 0, MAX_PIPES, DEVICE_NAME);
+ if (err < 0) {
+ printk("%s: cannot allocate device region\n", __func__);
+ goto fail_alloc_chrdev_region;
+ }
+ okl4_pipe_major = MAJOR(dev_num);
+
+ okl4_pipe_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(okl4_pipe_class)) {
+ err = PTR_ERR(okl4_pipe_class);
+ goto fail_class_create;
+ }
+
+ /* Register the driver with the microvisor bus */
+ err = platform_driver_register(&okl4_pipe_driver);
+ if (err)
+ goto fail_driver_register;
+
+ return 0;
+
+fail_driver_register:
+ class_destroy(okl4_pipe_class);
+fail_class_create:
+ unregister_chrdev_region(dev_num, MAX_PIPES);
+fail_alloc_chrdev_region:
+ return err;
+}
+
+static void __exit
+okl4_pipe_exit(void)
+{
+ dev_t dev_num = MKDEV(okl4_pipe_major, 0);
+
+ platform_driver_unregister(&okl4_pipe_driver);
+ class_destroy(okl4_pipe_class);
+ unregister_chrdev_region(dev_num, MAX_PIPES);
+}
+
+module_init(okl4_pipe_init);
+module_exit(okl4_pipe_exit);
+
+MODULE_DESCRIPTION("OKL4 pipe driver");
+MODULE_AUTHOR("John Clarke <johnc@cog.systems>");
diff --git a/drivers/char/vs_serial_client.c b/drivers/char/vs_serial_client.c
new file mode 100644
index 000000000000..a0bf1cc0db39
--- /dev/null
+++ b/drivers/char/vs_serial_client.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/char/vs_serial_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/client.h>
+
+#include "vs_serial_common.h"
+
+#define client_state_to_port(state) \
+ container_of(state, struct vtty_port, u.vs_client)
+
+static struct vs_mbuf *vs_serial_client_alloc_msg_buf(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return vs_client_serial_serial_alloc_msg(&port->u.vs_client, pbuf,
+ gfp_flags);
+}
+
+static void vs_serial_client_free_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ vs_client_serial_serial_free_msg(&port->u.vs_client, pbuf, mbuf);
+}
+
+static int vs_serial_client_send_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ return vs_client_serial_serial_send_msg(&port->u.vs_client, *pbuf,
+ mbuf);
+}
+
+static bool vs_serial_client_is_vservices_running(struct vtty_port *port)
+{
+ return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_client.state.base);
+}
+
+static struct vtty_port_ops client_port_ops = {
+ .alloc_msg_buf = vs_serial_client_alloc_msg_buf,
+ .free_msg_buf = vs_serial_client_free_msg_buf,
+ .send_msg_buf = vs_serial_client_send_msg_buf,
+ .is_running = vs_serial_client_is_vservices_running,
+};
+
+static struct vs_client_serial_state *
+vs_serial_client_alloc(struct vs_service_device *service)
+{
+ struct vtty_port *port;
+
+ port = vs_serial_alloc_port(service, &client_port_ops);
+ if (!port)
+ return NULL;
+
+ dev_set_drvdata(&service->dev, port);
+ return &port->u.vs_client;
+}
+
+static void vs_serial_client_release(struct vs_client_serial_state *_state)
+{
+ vs_serial_release(client_state_to_port(_state));
+}
+
+static void vs_serial_client_closed(struct vs_client_serial_state *_state)
+{
+ vs_serial_reset(client_state_to_port(_state));
+}
+
+static void vs_serial_client_opened(struct vs_client_serial_state *_state)
+{
+ struct vtty_port *port = client_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "ack_open\n");
+ port->max_transfer_size = _state->packet_size;
+}
+
+static int
+vs_serial_client_handle_message(struct vs_client_serial_state *_state,
+ struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+ return vs_serial_handle_message(client_state_to_port(_state), mbuf,
+ &data);
+}
+
+static struct vs_client_serial vs_client_serial_driver = {
+ .rx_atomic = true,
+ .alloc = vs_serial_client_alloc,
+ .release = vs_serial_client_release,
+ .closed = vs_serial_client_closed,
+ .opened = vs_serial_client_opened,
+ .serial = {
+ .msg_msg = vs_serial_client_handle_message,
+ },
+};
+
+static int __init vs_serial_client_init(void)
+{
+ return vservice_serial_client_register(&vs_client_serial_driver,
+ "vserial");
+}
+
+static void __exit vs_serial_client_exit(void)
+{
+ vservice_serial_client_unregister(&vs_client_serial_driver);
+}
+
+module_init(vs_serial_client_init);
+module_exit(vs_serial_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vs_serial_common.h b/drivers/char/vs_serial_common.h
new file mode 100644
index 000000000000..2fe7d2833eeb
--- /dev/null
+++ b/drivers/char/vs_serial_common.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/char/vs_serial_common.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _VS_SERIAL_COMMON_H
+#define _VS_SERIAL_COMMON_H
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/protocol/serial/client.h>
+
+#define OUTBUFFER_SIZE 1024
+#define vtty_list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+struct vtty_port;
+struct vs_service_device;
+
+struct vtty_port_ops {
+ struct vs_mbuf *(*alloc_msg_buf)(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags);
+ void (*free_msg_buf)(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+ int (*send_msg_buf)(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+ bool (*is_running)(struct vtty_port *port);
+};
+
+struct vtty_port {
+ union {
+ struct vs_client_serial_state vs_client;
+ struct vs_server_serial_state vs_server;
+ } u;
+
+ struct vs_service_device *service;
+ int port_num;
+
+ struct tty_driver *vtty_driver;
+
+ struct vtty_port_ops ops;
+
+ /* output data */
+ bool doing_release;
+
+ int max_transfer_size;
+
+ /* Tracks if tty layer can receive data from driver */
+ bool tty_canrecv;
+
+ /*
+ * List of pending incoming buffers from the vServices stack. If we
+ * receive a buffer, but cannot write it to the tty layer then we
+ * queue it on this list to handle later. in_lock protects access to
+ * the pending_in_packets list and the tty_canrecv field.
+ */
+ struct list_head pending_in_packets;
+ spinlock_t in_lock;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ struct console console;
+#endif
+
+ struct tty_port port;
+};
+
+extern struct vtty_port *
+vs_serial_alloc_port(struct vs_service_device *service,
+ struct vtty_port_ops *port_ops);
+extern void vs_serial_release(struct vtty_port *port);
+extern void vs_serial_reset(struct vtty_port *port);
+extern int vs_serial_handle_message(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+
+#endif /* _VS_SERIAL_COMMON_H */
diff --git a/drivers/char/vs_serial_server.c b/drivers/char/vs_serial_server.c
new file mode 100644
index 000000000000..d4a169e7579a
--- /dev/null
+++ b/drivers/char/vs_serial_server.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/char/vs_serial_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService server driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+
+#include "vs_serial_common.h"
+
+#define server_state_to_port(state) \
+ container_of(state, struct vtty_port, u.vs_server)
+
+static struct vs_mbuf *vs_serial_server_alloc_msg_buf(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return vs_server_serial_serial_alloc_msg(&port->u.vs_server, pbuf,
+ gfp_flags);
+}
+
+static void vs_serial_server_free_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ vs_server_serial_serial_free_msg(&port->u.vs_server, pbuf, mbuf);
+}
+
+static int vs_serial_server_send_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ return vs_server_serial_serial_send_msg(&port->u.vs_server, *pbuf, mbuf);
+}
+
+static bool vs_serial_server_is_vservices_running(struct vtty_port *port)
+{
+ return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_server.state.base);
+}
+
+static struct vtty_port_ops server_port_ops = {
+ .alloc_msg_buf = vs_serial_server_alloc_msg_buf,
+ .free_msg_buf = vs_serial_server_free_msg_buf,
+ .send_msg_buf = vs_serial_server_send_msg_buf,
+ .is_running = vs_serial_server_is_vservices_running,
+};
+
+static struct vs_server_serial_state *
+vs_serial_server_alloc(struct vs_service_device *service)
+{
+ struct vtty_port *port;
+
+ port = vs_serial_alloc_port(service, &server_port_ops);
+ if (!port)
+ return NULL;
+
+ dev_set_drvdata(&service->dev, port);
+ return &port->u.vs_server;
+}
+
+static void vs_serial_server_release(struct vs_server_serial_state *_state)
+{
+ vs_serial_release(server_state_to_port(_state));
+}
+
+static void vs_serial_server_closed(struct vs_server_serial_state *_state)
+{
+ vs_serial_reset(server_state_to_port(_state));
+}
+
+static int
+vs_serial_server_handle_message(struct vs_server_serial_state *_state,
+ struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+ return vs_serial_handle_message(server_state_to_port(_state), mbuf,
+ &data);
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_open(struct vs_server_serial_state *_state)
+{
+ struct vtty_port *port = server_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "req_open\n");
+
+ /* FIXME: Jira ticket SDK-3521 - ryanm. */
+ port->max_transfer_size = vs_service_max_mbuf_size(port->service) - 8;
+ _state->packet_size = port->max_transfer_size;
+
+ return VS_SERVER_RESP_SUCCESS;
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_close(struct vs_server_serial_state *_state)
+{
+ struct vtty_port *port = server_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "req_close\n");
+
+ return VS_SERVER_RESP_SUCCESS;
+}
+
+static struct vs_server_serial vs_server_serial_driver = {
+ .rx_atomic = true,
+ .alloc = vs_serial_server_alloc,
+ .release = vs_serial_server_release,
+ .closed = vs_serial_server_closed,
+ .open = vs_serial_server_req_open,
+ .close = vs_serial_server_req_close,
+ .serial = {
+ .msg_msg = vs_serial_server_handle_message,
+ },
+
+ /* Large default quota for batching data messages */
+ .in_quota_best = 16,
+ .out_quota_best = 16,
+};
+
+static int __init vs_serial_server_init(void)
+{
+ return vservice_serial_server_register(&vs_server_serial_driver,
+ "vserial");
+}
+
+static void __exit vs_serial_server_exit(void)
+{
+ vservice_serial_server_unregister(&vs_server_serial_driver);
+}
+
+module_init(vs_serial_server_init);
+module_exit(vs_serial_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vservices_serial.c b/drivers/char/vservices_serial.c
new file mode 100644
index 000000000000..0194eacc563a
--- /dev/null
+++ b/drivers/char/vservices_serial.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/char/vservice_serial.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * serial vservice client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#include "vs_serial_common.h"
+
+struct vtty_in_packet {
+ struct vs_pbuf pbuf;
+ size_t offset;
+};
+
+static int max_ttys = CONFIG_VSERVICES_VTTY_COUNT;
+static unsigned long *alloced_ttys;
+module_param(max_ttys, int, S_IRUGO);
+
+static struct tty_driver *vtty_driver;
+
+static DEFINE_MUTEX(tty_bitmap_lock);
+
+static struct vtty_port *dev_to_port(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+#if defined(CONFIG_VSERVICES_SERIAL_SERVER) || \
+ defined(CONFIG_VSERIVCES_SERIAL_SERVER_MODULE)
+ if (service->is_server) {
+ struct vs_server_serial_state *server = dev_get_drvdata(dev);
+ return container_of(server, struct vtty_port, u.vs_server);
+ }
+#endif
+#if defined(CONFIG_VSERVICES_SERIAL_CLIENT) || \
+ defined(CONFIG_VSERIVCES_SERIAL_CLIENT_MODULE)
+ if (!service->is_server) {
+ struct vs_client_serial_state *client = dev_get_drvdata(dev);
+ return container_of(client, struct vtty_port, u.vs_client);
+ }
+#endif
+ /* should never get here */
+ WARN_ON(1);
+ return NULL;
+}
+
+static struct vtty_port *port_from_tty(struct tty_struct *tty)
+{
+ return dev_to_port(tty->dev->parent);
+}
+
+static int vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct vtty_port *port;
+
+ if (tty->index < 0 || !test_bit(tty->index, alloced_ttys))
+ return -ENXIO;
+
+ port = port_from_tty(tty);
+
+ if (!port)
+ return -ENXIO;
+
+ tty->driver_data = port;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ if (tty->port)
+ tty->port->low_latency = 0;
+#else
+ tty->low_latency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+ tty->port = &port->port;
+ tty_standard_install(driver, tty);
+#else
+ tty->port = &port->port;
+ if (tty_init_termios(tty) != 0)
+ return -ENOMEM;
+
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+#endif
+
+ return 0;
+}
+
+static int vtty_open(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ return tty_port_open(&port->port, tty, file);
+}
+
+static void vtty_close(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, file);
+}
+
+static void vtty_shutdown(struct tty_port *port)
+{
+ struct vtty_port *vtty_port =
+ container_of(port, struct vtty_port, port);
+
+ if (vtty_port->doing_release)
+ kfree(port);
+}
+
+static int vtty_write_room(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ return vs_service_send_mbufs_available(port->service) *
+ port->max_transfer_size;
+}
+
+static struct vs_mbuf *vserial_alloc_send_buffer(struct vtty_port *port,
+ const unsigned char *buf, size_t size, struct vs_pbuf *pbuf,
+ gfp_t gfp_flags)
+{
+ struct vs_mbuf *mbuf;
+ ssize_t ret;
+
+ mbuf = port->ops.alloc_msg_buf(port, pbuf, gfp_flags);
+ if (IS_ERR(mbuf)) {
+ ret = PTR_ERR(mbuf);
+ goto fail;
+ }
+
+ ret = vs_pbuf_resize(pbuf, size);
+ if (ret < (ssize_t)size)
+ goto fail_free_buf;
+
+ ret = vs_pbuf_copyin(pbuf, 0, buf, size);
+ if (ret < (ssize_t)size)
+ goto fail_free_buf;
+
+ return mbuf;
+
+fail_free_buf:
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+fail:
+ return ERR_PTR(ret);
+}
+
+static int vtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct vtty_port *port;
+ size_t sent_bytes = 0, size;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+ int err;
+
+ if (WARN_ON(!tty || !buf))
+ return -EINVAL;
+
+ port = tty->driver_data;
+ if (!port->ops.is_running(port)) {
+ dev_dbg(&port->service->dev, "tty is not running!");
+ return 0;
+ }
+
+ /*
+ * We need to break our message up into chunks of
+ * port->max_transfer_size.
+ */
+ dev_dbg(&port->service->dev, "Writing %d bytes\n", count);
+ while (sent_bytes < count) {
+ size = min_t(size_t, count - sent_bytes,
+ port->max_transfer_size);
+
+ /*
+ * Passing &port->u.vs_client here works for both the client
+ * and the server since vs_client and vs_server are in the
+ * same union, and therefore have the same address.
+ */
+ mbuf = vs_service_waiting_alloc(&port->u.vs_client,
+ vserial_alloc_send_buffer(port,
+ buf + sent_bytes, size, &pbuf, GFP_KERNEL));
+ if (IS_ERR(mbuf)) {
+ dev_err(&port->service->dev,
+ "Failed to alloc mbuf of %zu bytes: %ld - resetting service\n",
+ size, PTR_ERR(mbuf));
+ vs_service_reset(port->service, port->service);
+ return -EIO;
+ }
+
+ vs_service_state_lock(port->service);
+ err = port->ops.send_msg_buf(port, mbuf, &pbuf);
+ vs_service_state_unlock(port->service);
+ if (err) {
+ port->ops.free_msg_buf(port, mbuf, &pbuf);
+ dev_err(&port->service->dev,
+ "send failed: %d - resetting service",
+ err);
+ vs_service_reset(port->service, port->service);
+ return -EIO;
+ }
+
+ dev_dbg(&port->service->dev, "Sent %zu bytes (%zu/%d)\n",
+ size, sent_bytes + size, count);
+ sent_bytes += size;
+ }
+
+ dev_dbg(&port->service->dev, "Write complete - sent %zu/%d bytes\n",
+ sent_bytes, count);
+ return sent_bytes;
+}
+
+static int vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ return vtty_write(tty, &ch, 1);
+}
+
+static size_t vs_serial_send_pbuf_to_tty(struct vtty_port *port,
+ struct vs_pbuf *pbuf, size_t offset)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ size_t space, size;
+
+ lockdep_assert_held(&port->in_lock);
+
+ size = vs_pbuf_size(pbuf) - offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ space = tty_buffer_request_room(tty->port, size);
+#else
+ space = tty_buffer_request_room(tty, size);
+#endif
+ if (space) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ tty_insert_flip_string(tty->port, pbuf->data + offset, space);
+ tty_flip_buffer_push(tty->port);
+#else
+ tty_insert_flip_string(tty, pbuf->data + offset, space);
+ tty_flip_buffer_push(tty);
+#endif
+ }
+
+ tty_kref_put(tty);
+
+ /* Return the number of bytes written */
+ return space;
+}
+
+static void vtty_throttle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ dev_dbg(&port->service->dev, "throttle\n");
+
+ spin_lock_bh(&port->in_lock);
+ port->tty_canrecv = false;
+ spin_unlock_bh(&port->in_lock);
+}
+
+static void vtty_unthrottle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+ struct vtty_in_packet *packet;
+ struct vs_mbuf *mbuf;
+ size_t sent;
+
+ dev_dbg(&port->service->dev, "unthrottle\n");
+
+ spin_lock_bh(&port->in_lock);
+
+ while (!list_empty(&port->pending_in_packets)) {
+ mbuf = list_first_entry(&port->pending_in_packets,
+ struct vs_mbuf, queue);
+ packet = mbuf->priv;
+
+ sent = vs_serial_send_pbuf_to_tty(port, &packet->pbuf,
+ packet->offset);
+ packet->offset += sent;
+ if (packet->offset < vs_pbuf_size(&packet->pbuf)) {
+ /*
+ * Only wrote part of the buffer. This means that we
+ * still have pending data that cannot be written to
+ * the tty at this time. The tty layer will rethrottle
+ * and this function will be called again when the tty
+ * layer is next able to handle data and we can write
+ * the remainder of the buffer.
+ */
+ dev_dbg(&port->service->dev,
+ "unthrottle: Only wrote %zu (%zu/%zu) bytes\n",
+ sent, packet->offset,
+ vs_pbuf_size(&packet->pbuf));
+ break;
+ }
+
+ dev_dbg(&port->service->dev,
+ "unthrottle: wrote %zu (%zu/%zu) bytes\n",
+ sent, packet->offset,
+ vs_pbuf_size(&packet->pbuf));
+
+ /* Wrote the whole buffer - free it */
+ list_del(&mbuf->queue);
+ port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+ kfree(packet);
+ }
+
+ port->tty_canrecv = true;
+ spin_unlock_bh(&port->in_lock);
+}
+
+static struct tty_port_operations vtty_port_ops = {
+ .shutdown = vtty_shutdown,
+};
+
+static struct tty_operations vtty_ops = {
+ .install = vtty_install,
+ .open = vtty_open,
+ .close = vtty_close,
+ .write = vtty_write,
+ .write_room = vtty_write_room,
+ .put_char = vtty_put_char,
+ .throttle = vtty_throttle,
+ .unthrottle = vtty_unthrottle
+};
+
+static int vs_serial_queue_incoming_packet(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf, size_t offset)
+{
+ struct vtty_in_packet *packet;
+
+ lockdep_assert_held(&port->in_lock);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet) {
+ /*
+ * Uh oh, we are seriously out of memory. The incoming data
+ * will be lost.
+ */
+ return -ENOMEM;
+ }
+
+ dev_dbg(&port->service->dev, "Queuing packet %zu bytes, offset %zu\n",
+ vs_pbuf_size(pbuf), offset);
+ mbuf->priv = packet;
+ memcpy(&packet->pbuf, pbuf, sizeof(*pbuf));
+ packet->offset = offset;
+
+ list_add_tail(&mbuf->queue, &port->pending_in_packets);
+ return 0;
+}
+
+int vs_serial_handle_message(struct vtty_port *port, struct vs_mbuf *mbuf,
+ struct vs_pbuf *pbuf)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ bool queue_packet = false;
+ size_t sent = 0;
+ int err;
+
+ if (!tty) {
+ dev_dbg(&port->service->dev,
+ "tty not open. Dropping %zu chars\n",
+ pbuf->size);
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ return 0;
+ }
+
+ dev_dbg(&port->service->dev, "Incoming message - len = %zu\n",
+ pbuf->size);
+
+ spin_lock(&port->in_lock);
+ if (!port->tty_canrecv || !list_empty(&port->pending_in_packets)) {
+ /*
+ * We cannot send to the tty right now, either because we are
+ * being throttled or because we still have pending data
+ * to write out to the tty. Queue the buffer up so we can
+ * write it later.
+ */
+ dev_dbg(&port->service->dev,
+ "Cannot send (canrecv = %d, queued = %d) - queuing message\n",
+ port->tty_canrecv,
+ !list_empty(&port->pending_in_packets));
+ queue_packet = true;
+
+ } else {
+ sent = vs_serial_send_pbuf_to_tty(port, pbuf, 0);
+ if (sent < vs_pbuf_size(pbuf)) {
+ /*
+ * Only wrote part of the buffer to the tty. Queue
+ * the buffer to write the rest.
+ */
+ dev_dbg(&port->service->dev,
+ "Sent %zu/%zu bytes to tty - queueing rest\n",
+ sent, vs_pbuf_size(pbuf));
+ queue_packet = true;
+ }
+ }
+
+ if (queue_packet) {
+ /*
+ * Queue the incoming data up. If we are not already throttled,
+ * the tty layer will do so now since it has no room in its
+ * buffers.
+ */
+ err = vs_serial_queue_incoming_packet(port, mbuf, pbuf, sent);
+ if (err) {
+ dev_err(&port->service->dev,
+ "Failed to queue packet - dropping chars\n");
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ }
+
+ } else {
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ }
+
+ spin_unlock(&port->in_lock);
+ tty_kref_put(tty);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_serial_handle_message);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options)
+{
+ if (co->index < 0 || co->index >= max_ttys)
+ co->index = 0;
+
+ pr_info("OKL4 virtual console init\n");
+
+ return 0;
+}
+
+static void vconsole_write(struct console *co, const char *p, unsigned count)
+{
+}
+
+static struct tty_driver *vconsole_device(struct console *co, int *index)
+{
+ *index = co->index;
+
+ return vtty_driver;
+}
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+static void vs_serial_free_buffers(struct vtty_port *port)
+{
+ struct vtty_in_packet *packet;
+ struct vs_mbuf *mbuf;
+
+ /* Free the list of incoming buffers */
+ spin_lock_bh(&port->in_lock);
+ while (!list_empty(&port->pending_in_packets)) {
+ mbuf = list_first_entry(&port->pending_in_packets,
+ struct vs_mbuf, queue);
+ packet = mbuf->priv;
+
+ list_del(&mbuf->queue);
+ port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+ kfree(packet);
+ }
+ spin_unlock_bh(&port->in_lock);
+}
+
+/** vservices callbacks **/
+struct vtty_port *vs_serial_alloc_port(struct vs_service_device *service,
+ struct vtty_port_ops *port_ops)
+{
+ struct vtty_port *port;
+ int port_num;
+
+ mutex_lock(&tty_bitmap_lock);
+ port_num = find_first_zero_bit(alloced_ttys, max_ttys);
+
+ if (port_num >= max_ttys) {
+ mutex_unlock(&tty_bitmap_lock);
+ return NULL;
+ }
+
+ port = kzalloc(sizeof(struct vtty_port), GFP_KERNEL);
+ if (!port) {
+ mutex_unlock(&tty_bitmap_lock);
+ return NULL;
+ }
+
+ port->service = service;
+ port->ops = *port_ops;
+ port->tty_canrecv = true;
+ port->port_num = port_num;
+ INIT_LIST_HEAD(&port->pending_in_packets);
+ spin_lock_init(&port->in_lock);
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ /* Set up and register the port's console device */
+ strlcpy(port->console.name, "vconvs", sizeof(port->console.name));
+ port->console.write = vconsole_write;
+ port->console.flags = CON_PRINTBUFFER;
+ port->console.device = vconsole_device;
+ port->console.setup = vconsole_setup;
+ port->console.index = port_num;
+
+ register_console(&port->console);
+#endif
+ port->vtty_driver = vtty_driver;
+
+ tty_port_init(&port->port);
+ port->port.ops = &vtty_port_ops;
+
+ tty_register_device(vtty_driver, port_num, &service->dev);
+ bitmap_set(alloced_ttys, port_num, 1);
+ mutex_unlock(&tty_bitmap_lock);
+
+ return port;
+}
+EXPORT_SYMBOL(vs_serial_alloc_port);
+
+void vs_serial_release(struct vtty_port *port)
+{
+ dev_dbg(&port->service->dev, "Release\n");
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ unregister_console(&port->console);
+#endif
+
+ mutex_lock(&tty_bitmap_lock);
+ bitmap_clear(alloced_ttys, port->port_num, 1);
+ mutex_unlock(&tty_bitmap_lock);
+
+ if (port->port.tty) {
+ tty_vhangup(port->port.tty);
+ tty_kref_put(port->port.tty);
+ }
+
+ vs_serial_free_buffers(port);
+ port->doing_release = true;
+ tty_unregister_device(vtty_driver, port->port_num);
+}
+EXPORT_SYMBOL_GPL(vs_serial_release);
+
+void vs_serial_reset(struct vtty_port *port)
+{
+ /* Free list of in and out mbufs. */
+ vs_serial_free_buffers(port);
+}
+EXPORT_SYMBOL_GPL(vs_serial_reset);
+
+static int __init vs_serial_init(void)
+{
+ int err;
+
+ if (max_ttys == 0)
+ return -EINVAL;
+
+ alloced_ttys = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(max_ttys),
+ GFP_KERNEL);
+ if (!alloced_ttys) {
+ err = -ENOMEM;
+ goto fail_alloc_ttys;
+ }
+
+ /* Set up the tty driver. */
+ vtty_driver = alloc_tty_driver(max_ttys);
+ if (!vtty_driver) {
+ err = -ENOMEM;
+ goto fail_alloc_tty_driver;
+ }
+
+ vtty_driver->owner = THIS_MODULE;
+ vtty_driver->driver_name = "okl4-vservices-serial";
+ vtty_driver->name = "ttyVS";
+ vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+ vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ vtty_driver->init_termios = tty_std_termios;
+ vtty_driver->num = max_ttys;
+
+ /* These flags don't really matter; just use sensible defaults. */
+ vtty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ vtty_driver->init_termios.c_ispeed = 9600;
+ vtty_driver->init_termios.c_ospeed = 9600;
+
+ tty_set_operations(vtty_driver, &vtty_ops);
+
+ err = tty_register_driver(vtty_driver);
+ if (err)
+ goto fail_tty_driver_register;
+
+ return 0;
+
+fail_tty_driver_register:
+ put_tty_driver(vtty_driver);
+fail_alloc_tty_driver:
+ kfree(alloced_ttys);
+fail_alloc_ttys:
+ return err;
+}
+
+static void __exit vs_serial_exit(void)
+{
+ tty_unregister_driver(vtty_driver);
+ put_tty_driver(vtty_driver);
+}
+
+module_init(vs_serial_init);
+module_exit(vs_serial_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Core Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index c0f612dee144..acde37cfb853 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -82,6 +82,8 @@ struct lpm_debug {
uint32_t arg4;
};
+static DEFINE_SPINLOCK(bc_timer_lock);
+
struct lpm_cluster *lpm_root_node;
#define MAXSAMPLES 5
@@ -1019,6 +1021,7 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
{
struct lpm_cluster_level *level = &cluster->levels[idx];
struct cpumask online_cpus;
+ int ret = 0;
cpumask_and(&online_cpus, &cluster->num_children_in_sync,
cpu_online_mask);
@@ -1045,7 +1048,11 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
if (level->notify_rpm) {
clear_predict_history();
clear_cl_predict_history();
- if (system_sleep_enter())
+
+ spin_lock(&bc_timer_lock);
+ ret = system_sleep_enter();
+ spin_unlock(&bc_timer_lock);
+ if (ret)
return -EBUSY;
}
/* Notify cluster enter event after successfully config completion */
@@ -1178,8 +1185,11 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
level = &cluster->levels[cluster->last_level];
- if (level->notify_rpm)
+ if (level->notify_rpm) {
+ spin_lock(&bc_timer_lock);
system_sleep_exit();
+ spin_unlock(&bc_timer_lock);
+ }
update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
cluster->num_children_in_sync.bits[0],
@@ -1272,6 +1282,7 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
{
int affinity_level = 0, state_id = 0, power_state = 0;
bool success = false;
+ int ret = 0;
/*
* idx = 0 is the default LPM state
*/
@@ -1284,7 +1295,17 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
}
if (from_idle && cpu->levels[idx].use_bc_timer) {
- if (tick_broadcast_enter())
+ /*
+ * tick_broadcast_enter can change the affinity of the
+ * broadcast timer interrupt, during which interrupt will
+ * be disabled and enabled back. To avoid system pm ops
+ * doing any interrupt state save or restore in between
+ * this window hold the lock.
+ */
+ spin_lock(&bc_timer_lock);
+ ret = tick_broadcast_enter();
+ spin_unlock(&bc_timer_lock);
+ if (ret)
return success;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index f89ba73b0847..a270566c2c88 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -227,6 +227,9 @@ void gic_v3_dist_save(void)
void __iomem *base = gic_data.dist_base;
int reg, i;
+ if (!base)
+ return;
+
bitmap_zero(irqs_restore, MAX_IRQ);
for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
@@ -423,6 +426,9 @@ static void _gic_v3_dist_clear_reg(u32 offset)
*/
void gic_v3_dist_restore(void)
{
+ if (!gic_data.dist_base)
+ return;
+
_gic_v3_dist_check_icfgr();
_gic_v3_dist_check_ipriorityr();
_gic_v3_dist_check_isenabler();
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index cddbd8309ece..f2796be6d682 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -422,7 +422,7 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
}
for (i = 0; i < req->data->cmd_arrary_count ; i++) {
- uint64_t hw_vaddr_ptr = 0;
+ dma_addr_t hw_vaddr_ptr = 0;
size_t len = 0;
if ((!cdm_cmd->cmd[i].len) &&
@@ -470,7 +470,7 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
}
rc = 0;
hw_vaddr_ptr =
- (uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+ (dma_addr_t) cdm_cmd->cmd[i].bl_addr.hw_iova;
len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
} else {
CAM_ERR(CAM_CDM,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 891b73872e23..670349d9fc17 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -56,7 +56,7 @@ int cam_context_shutdown(struct cam_context *ctx)
}
if (!rc)
- cam_destroy_device_hdl(ctx_hdl);
+ rc = cam_destroy_device_hdl(ctx_hdl);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index fce7fc6cc6b8..89aad8cb58c3 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -490,6 +490,7 @@ free_hw:
release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
ctx->ctxt_to_hw_map = NULL;
+ ctx->dev_hdl = -1;
end:
return rc;
}
@@ -504,6 +505,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
bool free_req;
CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);
+ memset(&flush_args, 0, sizeof(flush_args));
/*
* flush pending requests, take the sync lock to synchronize with the
@@ -670,6 +672,7 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);
+ memset(&flush_args, 0, sizeof(flush_args));
flush_args.num_req_pending = 0;
flush_args.num_req_active = 0;
mutex_lock(&ctx->sync_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 0a9fabcf8d5e..3f24c6d41a3f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -413,13 +413,16 @@ int cam_node_deinit(struct cam_node *node)
int cam_node_shutdown(struct cam_node *node)
{
int i = 0;
+ int rc = 0;
if (!node)
return -EINVAL;
for (i = 0; i < node->ctx_size; i++) {
- if (node->ctx_list[i].dev_hdl >= 0) {
- cam_context_shutdown(&(node->ctx_list[i]));
+ if (node->ctx_list[i].dev_hdl > 0) {
+ rc = cam_context_shutdown(&(node->ctx_list[i]));
+ if (rc)
+ continue;
cam_context_putref(&(node->ctx_list[i]));
}
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 212065050d81..053447e331a8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -581,15 +581,16 @@ static int cam_cpas_util_set_camnoc_axi_clk_rate(
required_camnoc_bw);
}
- required_camnoc_bw += (required_camnoc_bw *
- soc_private->camnoc_axi_clk_bw_margin) / 100;
+ required_camnoc_bw += div64_u64((required_camnoc_bw *
+ soc_private->camnoc_axi_clk_bw_margin), 100);
if ((required_camnoc_bw > 0) &&
(required_camnoc_bw <
soc_private->camnoc_axi_min_ib_bw))
required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
- clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
+ clk_rate = div64_u64(required_camnoc_bw,
+ soc_private->camnoc_bus_width);
CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %d",
required_camnoc_bw, clk_rate);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index d3c39f940f05..09388fe40c70 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -536,7 +536,7 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
int rc = -EINVAL;
uint32_t i, j, plane, num_out_buf, num_in_buf;
struct cam_buf_io_cfg *io_cfg;
- uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
size_t size;
bool need_io_map, need_cpu_map;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index d32d4b69938f..4aa2b61104b8 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -599,7 +599,7 @@ static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
uint64_t base_clk;
uint64_t mul = 1000000000;
- base_clk = (frame_cycles * mul) / budget;
+ base_clk = div64_u64((frame_cycles * mul), budget);
CAM_DBG(CAM_ICP, "budget = %lld fc = %d ib = %lld base_clk = %lld",
budget, frame_cycles,
@@ -3099,7 +3099,7 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
size_in_words = (*(uint32_t *)task_data->data) >> 2;
- CAM_INFO(CAM_ICP, "size_in_words %u", size_in_words);
+ CAM_DBG(CAM_ICP, "size_in_words %u", size_in_words);
rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
CRM_TASK_PRIORITY_0);
if (rc)
@@ -3175,8 +3175,8 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
frame_info = (struct icp_frame_info *)config_args->priv;
req_id = frame_info->request_id;
idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
- ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
+ ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
frame_info->io_config);
@@ -3268,7 +3268,7 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
{
int rc = 0;
int i, j, k;
- uint64_t addr;
+ dma_addr_t addr;
size_t len;
struct cam_cmd_buf_desc *cmd_desc = NULL;
uint64_t cpu_addr = 0;
@@ -3490,7 +3490,7 @@ static int cam_icp_process_generic_cmd_buffer(
struct cam_packet *packet,
struct cam_icp_hw_ctx_data *ctx_data,
int32_t index,
- uint64_t *io_buf_addr)
+ dma_addr_t *io_buf_addr)
{
int i, rc = 0;
struct cam_cmd_buf_desc *cmd_desc = NULL;
@@ -3841,19 +3841,15 @@ static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
return -EINVAL;
}
- CAM_DBG(CAM_REQ, "ctx_id %d req %lld Flush type %d",
- ctx_data->ctx_id,
- *(int64_t *)flush_args->flush_req_pending[0],
- flush_args->flush_type);
+ CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d",
+ ctx_data->ctx_id, flush_args->flush_type);
switch (flush_args->flush_type) {
case CAM_FLUSH_TYPE_ALL:
mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (!hw_mgr->recovery) {
- if (flush_args->num_req_active) {
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- cam_icp_mgr_abort_handle(ctx_data);
- }
+ if (!hw_mgr->recovery && flush_args->num_req_active) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ cam_icp_mgr_abort_handle(ctx_data);
} else {
mutex_unlock(&hw_mgr->hw_mgr_mutex);
}
@@ -4136,7 +4132,7 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
{
int rc = 0, bitmap_size = 0;
uint32_t ctx_id = 0;
- uint64_t io_buf_addr;
+ dma_addr_t io_buf_addr;
size_t io_buf_size;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 0b931f3c7636..8c132c52739d 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -135,7 +135,7 @@ struct clk_work_data {
*/
struct icp_frame_info {
uint64_t request_id;
- uint64_t io_config;
+ dma_addr_t io_config;
struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
};
@@ -238,7 +238,7 @@ struct cam_icp_hw_ctx_data {
struct icp_cmd_generic_blob {
struct cam_icp_hw_ctx_data *ctx;
uint32_t frame_info_idx;
- uint64_t *io_buf_addr;
+ dma_addr_t *io_buf_addr;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f5b1bb147c48..bd1e6b1dffbd 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -25,19 +25,15 @@
#include "cam_debug_util.h"
static const char isp_dev_name[] = "isp";
-
-#define INC_STATE_MONITOR_HEAD(head) \
- (atomic64_add_return(1, head) % \
- CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES)
-
static void __cam_isp_ctx_update_state_monitor_array(
struct cam_isp_context *ctx_isp,
enum cam_isp_state_change_trigger trigger_type,
uint32_t req_id)
{
- int iterator = 0;
+ uint64_t iterator = 0;
- iterator = INC_STATE_MONITOR_HEAD(&ctx_isp->state_monitor_head);
+ div64_u64_rem(atomic64_add_return(1, &ctx_isp->state_monitor_head),
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
ctx_isp->substate_activated;
ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
@@ -96,6 +92,7 @@ static void __cam_isp_ctx_dump_state_monitor_array(
int i = 0;
uint64_t state_head = 0;
uint64_t index;
+ struct cam_isp_context_state_monitor *cam_isp_ctx_state_monitor;
state_head = atomic64_read(&ctx_isp->state_monitor_head);
CAM_ERR_RATE_LIMIT(CAM_ISP,
@@ -103,17 +100,20 @@ static void __cam_isp_ctx_dump_state_monitor_array(
for (i = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES - 1; i >= 0;
i--) {
- index = (((state_head - i) +
- CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) %
- CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES);
+ div64_u64_rem(((state_head - i) +
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES),
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &index);
+ cam_isp_ctx_state_monitor =
+ &ctx_isp->cam_isp_ctx_state_monitor[index];
+
CAM_ERR_RATE_LIMIT(CAM_ISP,
- "time[0x%llx] req_id[%u] state[%s] evt_type[%s]",
- ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
- ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
- __cam_isp_ctx_substate_val_to_type(
- ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
- __cam_isp_hw_evt_val_to_type(
- ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
+ "time[0x%llx] req_id[%u] state[%s] evt_type[%s]",
+ cam_isp_ctx_state_monitor->evt_time_stamp,
+ cam_isp_ctx_state_monitor->req_id,
+ __cam_isp_ctx_substate_val_to_type(
+ cam_isp_ctx_state_monitor->curr_state),
+ __cam_isp_hw_evt_val_to_type(
+ cam_isp_ctx_state_monitor->trigger));
}
}
@@ -524,11 +524,11 @@ static int __cam_isp_ctx_reg_upd_in_activated_state(
struct cam_context *ctx = ctx_isp->base;
struct cam_isp_ctx_req *req_isp;
- if (list_empty(&ctx->pending_req_list)) {
- CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
+ if (list_empty(&ctx->wait_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
goto end;
}
- req = list_first_entry(&ctx->pending_req_list,
+ req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
@@ -686,24 +686,16 @@ static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
* This is for the first update. The initial setting will
* cause the reg_upd in the first frame.
*/
- if (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
+ if (!list_empty(&ctx->wait_req_list)) {
+ req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- if (req_isp->num_fence_map_out == req_isp->num_acked) {
+ if (req_isp->num_fence_map_out == req_isp->num_acked)
list_add_tail(&req->list, &ctx->free_req_list);
- } else {
- /* need to handle the buf done */
- list_add_tail(&req->list, &ctx->active_req_list);
- ctx_isp->active_req_cnt++;
- CAM_DBG(CAM_REQ,
- "move request %lld to active list(cnt = %d)",
- req->request_id,
- ctx_isp->active_req_cnt);
- ctx_isp->substate_activated =
- CAM_ISP_CTX_ACTIVATED_EPOCH;
- }
+ else
+ CAM_ERR(CAM_ISP,
+ "receive rup in unexpected state");
}
if (req != NULL) {
__cam_isp_ctx_update_state_monitor_array(ctx_isp,
@@ -722,12 +714,12 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
struct cam_context *ctx = ctx_isp->base;
uint64_t request_id = 0;
- if (list_empty(&ctx->pending_req_list)) {
+ if (list_empty(&ctx->wait_req_list)) {
/*
- * If no pending req in epoch, this is an error case.
+ * If no wait req in epoch, this is an error case.
* The recovery is to go back to sof state
*/
- CAM_ERR(CAM_ISP, "No pending request");
+ CAM_ERR(CAM_ISP, "No wait request");
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
/* Send SOF event as empty frame*/
@@ -737,7 +729,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
goto end;
}
- req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
@@ -746,6 +738,9 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.req_id = req->request_id;
@@ -826,8 +821,10 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
req = list_last_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
- __cam_isp_ctx_update_state_monitor_array(ctx_isp,
- CAM_ISP_STATE_CHANGE_TRIGGER_SOF, ctx->req_list->request_id);
+ if (req)
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+ ctx->req_list->request_id);
CAM_DBG(CAM_ISP, "next substate %d",
ctx_isp->substate_activated);
@@ -869,7 +866,7 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
* transition to BUBBLE state again.
*/
- if (list_empty(&ctx->pending_req_list)) {
+ if (list_empty(&ctx->wait_req_list)) {
/*
* If no pending req in epoch, this is an error case.
* Just go back to the bubble state.
@@ -882,14 +879,16 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
goto end;
}
- req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ list_del_init(&req->list);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
+ list_add(&req->list, &ctx->pending_req_list);
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.req_id = req->request_id;
@@ -903,7 +902,6 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
* If we can not report bubble, then treat it as if no bubble
* report. Just move the req to active list.
*/
- list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
ctx_isp->active_req_cnt++;
CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
@@ -929,8 +927,9 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
end:
req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
list);
- __cam_isp_ctx_update_state_monitor_array(ctx_isp,
- CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
+ if (req)
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
+ CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
return 0;
}
@@ -1079,30 +1078,6 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
return rc;
}
-static int __cam_isp_ctx_sof_in_flush(
- struct cam_isp_context *ctx_isp, void *evt_data)
-{
- int rc = 0;
- struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
-
- if (!evt_data) {
- CAM_ERR(CAM_ISP, "in valid sof event data");
- return -EINVAL;
- }
- ctx_isp->frame_id++;
- ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
- ctx_isp->boot_timestamp = sof_event_data->boot_time;
- CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
- ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
-
- if (--ctx_isp->frame_skip_count == 0)
- ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
- else
- CAM_ERR(CAM_ISP, "Skip currect SOF");
-
- return rc;
-}
-
static struct cam_isp_ctx_irq_ops
cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
/* SOF */
@@ -1174,17 +1149,6 @@ static struct cam_isp_ctx_irq_ops
/* HALT */
{
},
- /* FLUSH */
- {
- .irq_ops = {
- NULL,
- __cam_isp_ctx_sof_in_flush,
- NULL,
- NULL,
- NULL,
- __cam_isp_ctx_buf_done_in_applied,
- },
- },
};
static int __cam_isp_ctx_apply_req_in_activated_state(
@@ -1267,6 +1231,8 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = next_state;
ctx_isp->last_applied_req_id = apply->request_id;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->wait_req_list);
CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
next_state, ctx_isp->last_applied_req_id);
spin_unlock_bh(&ctx->lock);
@@ -1341,7 +1307,11 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
INIT_LIST_HEAD(&flush_list);
if (list_empty(req_list)) {
CAM_DBG(CAM_ISP, "request list is empty");
- return 0;
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ CAM_ERR(CAM_ISP, "no request to cancel");
+ return -EINVAL;
+ } else
+ return 0;
}
CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
@@ -1395,36 +1365,58 @@ static int __cam_isp_ctx_flush_req_in_top_state(
struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush_req)
{
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_stop_args stop_isp;
+ struct cam_hw_stop_args stop_args;
+ struct cam_isp_start_args start_isp;
int rc = 0;
CAM_DBG(CAM_ISP, "try to flush pending list");
spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
spin_unlock_bh(&ctx->lock);
- CAM_DBG(CAM_ISP, "Flush request in top state %d",
- ctx->state);
- return rc;
-}
-static int __cam_isp_ctx_flush_req_in_activated(
- struct cam_context *ctx,
- struct cam_req_mgr_flush_request *flush_req)
-{
- int rc = 0;
- struct cam_isp_context *ctx_isp;
-
- ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ /* if active and wait list are empty, return */
+ spin_lock_bh(&ctx->lock);
+ if ((list_empty(&ctx->wait_req_list)) &&
+ (list_empty(&ctx->active_req_list))) {
+ spin_unlock_bh(&ctx->lock);
+ CAM_DBG(CAM_ISP, "active and wait list are empty");
+ goto end;
+ }
+ spin_unlock_bh(&ctx->lock);
- CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
- rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ /* Stop hw first before active list flush */
+ stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+ stop_isp.stop_only = true;
+ stop_args.args = (void *)&stop_isp;
+ ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+ &stop_args);
- /* only if request is found in pending queue, move to flush state*/
- if (!rc) {
spin_lock_bh(&ctx->lock);
- ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
- ctx_isp->frame_skip_count = 2;
+ CAM_DBG(CAM_ISP, "try to flush wait list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
+ flush_req);
+ CAM_DBG(CAM_ISP, "try to flush active list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
+ flush_req);
spin_unlock_bh(&ctx->lock);
+
+ /* Start hw */
+ start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ start_isp.start_only = true;
+ start_isp.hw_config.priv = NULL;
+
+ rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+ &start_isp);
}
+
+end:
+ CAM_DBG(CAM_ISP, "Flush request in top state %d",
+ ctx->state);
return rc;
}
@@ -1438,7 +1430,7 @@ static int __cam_isp_ctx_flush_req_in_ready(
spin_lock_bh(&ctx->lock);
rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
- /* if nothing is in pending req list, change state to acquire*/
+ /* if nothing is in pending req list, change state to acquire */
if (list_empty(&ctx->pending_req_list))
ctx->state = CAM_CTX_ACQUIRED;
spin_unlock_bh(&ctx->lock);
@@ -1500,12 +1492,6 @@ static struct cam_ctx_ops
.crm_ops = {},
.irq_ops = NULL,
},
- /* FLUSH */
- {
- .ioctl_ops = {},
- .crm_ops = {},
- .irq_ops = NULL,
- },
};
static int __cam_isp_ctx_rdi_only_sof_in_top_state(
@@ -1763,11 +1749,11 @@ static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
/* notify reqmgr with sof signal*/
if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
- if (list_empty(&ctx->pending_req_list)) {
- CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
+ if (list_empty(&ctx->wait_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
goto error;
}
- req = list_first_entry(&ctx->pending_req_list,
+ req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
@@ -1890,17 +1876,6 @@ static struct cam_isp_ctx_irq_ops
/* HALT */
{
},
- /* FLUSH */
- {
- .irq_ops = {
- NULL,
- __cam_isp_ctx_sof_in_flush,
- NULL,
- NULL,
- NULL,
- __cam_isp_ctx_buf_done_in_applied,
- },
- },
};
static int __cam_isp_ctx_rdi_only_apply_req_top_state(
@@ -1968,12 +1943,6 @@ static struct cam_ctx_ops
.crm_ops = {},
.irq_ops = NULL,
},
- /* FLUSHED */
- {
- .ioctl_ops = {},
- .crm_ops = {},
- .irq_ops = NULL,
- },
};
/* top level state machine */
@@ -2366,7 +2335,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc = 0;
- struct cam_hw_config_args arg;
+ struct cam_isp_start_args start_isp;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp =
@@ -2395,12 +2364,13 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
goto end;
}
- arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
- arg.request_id = req->request_id;
- arg.hw_update_entries = req_isp->cfg;
- arg.num_hw_update_entries = req_isp->num_cfg;
- arg.priv = &req_isp->hw_update_data;
- arg.init_packet = 1;
+ start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ start_isp.hw_config.request_id = req->request_id;
+ start_isp.hw_config.hw_update_entries = req_isp->cfg;
+ start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
+ start_isp.hw_config.priv = &req_isp->hw_update_data;
+ start_isp.hw_config.init_packet = 1;
+ start_isp.start_only = false;
ctx_isp->frame_id = 0;
ctx_isp->active_req_cnt = 0;
@@ -2417,7 +2387,8 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
*/
ctx->state = CAM_CTX_ACTIVATED;
trace_cam_context_state("ISP", ctx);
- rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+ rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+ &start_isp);
if (rc) {
/* HW failure. user need to clean up the resource */
CAM_ERR(CAM_ISP, "Start HW failed");
@@ -2427,9 +2398,13 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
}
CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
+ list_del_init(&req->list);
+
if (req_isp->num_fence_map_out) {
- list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ } else {
+ list_add_tail(&req->list, &ctx->wait_req_list);
}
end:
return rc;
@@ -2458,6 +2433,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_stop_args stop_isp;
/* Mask off all the incoming hardware events */
spin_lock_bh(&ctx->lock);
@@ -2468,7 +2444,15 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
/* stop hw first */
if (ctx_isp->hw_ctx) {
stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
- stop.args = stop_cmd;
+
+ if (stop_cmd)
+ stop_isp.hw_stop_cmd =
+ CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+ else
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+
+ stop_isp.stop_only = false;
+ stop.args = (void *) &stop_isp;
ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
&stop);
}
@@ -2489,6 +2473,22 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
list_add_tail(&req->list, &ctx->free_req_list);
}
+ while (!list_empty(&ctx->wait_req_list)) {
+ req = list_first_entry(&ctx->wait_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
while (!list_empty(&ctx->active_req_list)) {
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
@@ -2746,7 +2746,7 @@ static struct cam_ctx_ops
.crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_activated,
.apply_req = __cam_isp_ctx_apply_req,
- .flush_req = __cam_isp_ctx_flush_req_in_activated,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
.process_evt = __cam_isp_ctx_process_evt,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index a939f2d80b34..4592e42932d8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -57,7 +57,6 @@ enum cam_isp_ctx_activated_substate {
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
CAM_ISP_CTX_ACTIVATED_HW_ERROR,
CAM_ISP_CTX_ACTIVATED_HALT,
- CAM_ISP_CTX_ACTIVATED_FLUSH,
CAM_ISP_CTX_ACTIVATED_MAX,
};
@@ -155,7 +154,6 @@ struct cam_isp_context_state_monitor {
* @subscribe_event: The irq event mask that CRM subscribes to, IFE
* will invoke CRM cb at those event.
* @last_applied_req_id: Last applied request id
- * @frame_skip_count: Number of frame to skip before change state
* @state_monitor_head: Write index to the state monitoring array
* @cam_isp_ctx_state_monitor: State monitoring array
* @rdi_only_context: Get context type information.
@@ -180,7 +178,6 @@ struct cam_isp_context {
int64_t reported_req_id;
uint32_t subscribe_event;
int64_t last_applied_req_id;
- uint32_t frame_skip_count;
atomic64_t state_monitor_head;
struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 8d764b043a18..aab323eac67b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -283,6 +283,100 @@ static void cam_ife_hw_mgr_deinit_hw_res(
}
}
+static int cam_ife_hw_mgr_init_hw(
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ int rc = 0, i;
+
+ CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
+ ctx->ctx_index);
+ /* INIT IFE CID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
+ ctx->ctx_index);
+
+ /* INIT IFE csid */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+
+ /* INIT IFE SRC */
+ CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
+ ctx->ctx_index);
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+
+ /* INIT IFE OUT */
+ CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+ ctx->ctx_index);
+
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
+ ctx->res_list_ife_out[i].res_id);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static void cam_ife_hw_mgr_deinit_hw(
+ struct cam_ife_hw_mgr_ctx *ctx)
+{
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ int i = 0;
+
+ if (!ctx->init_done) {
+ CAM_WARN(CAM_ISP, "ctx is not in init state");
+ return;
+ }
+
+ /* Deinit IFE CID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+ CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deinit IFE CSID */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+ CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deint IFE MUX(SRC) */
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+ }
+
+ /* Deinit IFE OUT */
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+ cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
+
+ ctx->init_done = false;
+}
+
static int cam_ife_hw_mgr_put_res(
struct list_head *src_list,
struct cam_ife_hw_mgr_res **res)
@@ -1833,6 +1927,7 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
{
int rc = 0;
struct cam_hw_stop_args *stop_args = stop_hw_args;
+ struct cam_isp_stop_args *stop_isp;
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_ife_hw_mgr_ctx *ctx;
enum cam_ife_csid_halt_cmd csid_halt_type;
@@ -1842,6 +1937,7 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
+
ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
CAM_ERR(CAM_ISP, "Invalid context is used");
@@ -1849,12 +1945,20 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
}
CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
+ stop_isp = (struct cam_isp_stop_args *)stop_args->args;
+
+ if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_IMMEDIATELY) &&
+ (stop_isp->stop_only)) {
+ CAM_ERR(CAM_ISP, "Invalid params hw_stop_cmd:%d stop_only:%d",
+ stop_isp->hw_stop_cmd, stop_isp->stop_only);
+ return -EPERM;
+ }
/* Set the csid halt command */
- if (!stop_args->args)
- csid_halt_type = CAM_CSID_HALT_IMMEDIATELY;
- else
+ if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY;
+ else
+ csid_halt_type = CAM_CSID_HALT_IMMEDIATELY;
/* Note:stop resource will remove the irq mask from the hardware */
@@ -1930,30 +2034,15 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
ctx->base[i].idx, csid_halt_type);
}
- /* Deinit IFE CID */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
- cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
- }
-
- /* Deinit IFE CSID */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
- cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
- }
-
- /* Deint IFE MUX(SRC) */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
- }
-
- /* Deinit IFE OUT */
- for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
- cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
+ if (stop_isp->stop_only)
+ goto end;
+ cam_ife_hw_mgr_deinit_hw(ctx);
CAM_DBG(CAM_ISP,
"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
+end:
+
mutex_lock(&g_ife_hw_mgr.ctx_mutex);
if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
@@ -2065,19 +2154,20 @@ err:
static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
{
int rc = -1;
- struct cam_hw_config_args *start_args = start_hw_args;
+ struct cam_isp_start_args *start_isp = start_hw_args;
struct cam_hw_stop_args stop_args;
- struct cam_isp_stop_hw_method stop_hw_method;
+ struct cam_isp_stop_args stop_isp;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr_res *hw_mgr_res;
uint32_t i;
- if (!hw_mgr_priv || !start_hw_args) {
+ if (!hw_mgr_priv || !start_isp) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
- ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+ ctx = (struct cam_ife_hw_mgr_ctx *)
+ start_isp->hw_config.ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
CAM_ERR(CAM_ISP, "Invalid context is used");
return -EPERM;
@@ -2090,6 +2180,9 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
cam_tasklet_start(ctx->common.tasklet_info);
+ if (ctx->init_done && start_isp->start_only)
+ goto start_only;
+
/* set current csid debug information to CSID HW */
for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
if (g_ife_hw_mgr.csid_devices[i])
@@ -2100,58 +2193,13 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
}
- /* INIT IFE Root: do nothing */
-
- CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
- ctx->ctx_index);
- /* INIT IFE CID */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
- hw_mgr_res->res_id);
- goto err;
- }
- }
-
-
- CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
- ctx->ctx_index);
-
- /* INIT IFE csid */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
- hw_mgr_res->res_id);
- goto err;
- }
- }
-
- /* INIT IFE SRC */
- CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
- ctx->ctx_index);
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
- hw_mgr_res->res_id);
- goto err;
- }
+ rc = cam_ife_hw_mgr_init_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Init failed");
+ goto err;
}
- /* INIT IFE OUT */
- CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
- ctx->ctx_index);
-
- for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
- rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
- ctx->res_list_ife_out[i].res_id);
- goto err;
- }
- }
+start_only:
mutex_lock(&g_ife_hw_mgr.ctx_mutex);
if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
@@ -2172,12 +2220,15 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
goto err;
}
- /* Apply initial configuration */
- CAM_DBG(CAM_ISP, "Config HW");
- rc = cam_ife_mgr_config_hw(hw_mgr_priv, start_hw_args);
- if (rc) {
- CAM_ERR(CAM_ISP, "Config HW failed");
- goto err;
+ if (!start_isp->start_only) {
+ /* Apply initial configuration */
+ CAM_DBG(CAM_ISP, "Config HW");
+ rc = cam_ife_mgr_config_hw(hw_mgr_priv,
+ &start_isp->hw_config);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Config HW failed");
+ goto err;
+ }
}
CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
@@ -2229,13 +2280,18 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
}
}
+ ctx->init_done = true;
/* Start IFE root node: do nothing */
CAM_DBG(CAM_ISP, "Start success for ctx id:%d", ctx->ctx_index);
+
return 0;
+
err:
- stop_hw_method.hw_stop_cmd = CAM_CSID_HALT_IMMEDIATELY;
- stop_args.ctxt_to_hw_map = start_args->ctxt_to_hw_map;
- stop_args.args = (void *)(&stop_hw_method);
+ stop_isp.stop_only = false;
+ stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+ stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map;
+ stop_args.args = (void *)(&stop_isp);
+
cam_ife_mgr_stop_hw(hw_mgr_priv, &stop_args);
CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
@@ -2274,6 +2330,9 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
+ if (ctx->init_done)
+ cam_ife_hw_mgr_deinit_hw(ctx);
+
/* we should called the stop hw before this already */
cam_ife_hw_mgr_release_hw_for_ctx(ctx);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 0e678b4584cd..0198f3d62e9c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -123,6 +123,7 @@ struct cam_ife_hw_mgr_debug {
* context
* @is_rdi_only_context flag to specify the context has only rdi resource
* @config_done_complete indicator for configuration complete
+ * @init_done indicate whether init hw is done
*/
struct cam_ife_hw_mgr_ctx {
struct list_head list;
@@ -156,6 +157,7 @@ struct cam_ife_hw_mgr_ctx {
atomic_t overflow_pending;
uint32_t is_rdi_only_context;
struct completion config_done_complete;
+ bool init_done;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index abc6bb0a6db8..eaa7325fbca8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -434,7 +434,7 @@ int cam_isp_add_io_buffers(
bool fill_fence)
{
int rc = 0;
- uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
struct cam_buf_io_cfg *io_cfg;
struct cam_isp_resource_node *res;
struct cam_ife_hw_mgr_res *hw_mgr_res;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index 8f1911edf2c9..fd71c37c8fa1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -60,13 +60,29 @@ enum cam_isp_hw_stop_cmd {
};
/**
- * struct cam_isp_stop_hw_method - hardware stop method
+ * struct cam_isp_stop_args - hardware stop arguments
*
* @hw_stop_cmd: Hardware stop command type information
+ * @stop_only Send stop only to hw drivers. No Deinit to be
+ * done.
*
*/
-struct cam_isp_stop_hw_method {
+struct cam_isp_stop_args {
enum cam_isp_hw_stop_cmd hw_stop_cmd;
+ bool stop_only;
+};
+
+/**
+ * struct cam_isp_start_args - isp hardware start arguments
+ *
+ * @config_args: Hardware configuration commands.
+ * @start_only Send start only to hw drivers. No init to
+ * be done.
+ *
+ */
+struct cam_isp_start_args {
+ struct cam_hw_config_args hw_config;
+ bool start_only;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 70e04677282d..28cfcc8bea74 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -177,7 +177,7 @@ struct cam_isp_hw_cmd_buf_update {
*
*/
struct cam_isp_hw_get_wm_update {
- uint64_t *image_buf;
+ dma_addr_t *image_buf;
uint32_t num_buf;
struct cam_buf_io_cfg *io_cfg;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 67b572e039d8..206b3a32d9fc 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1131,6 +1131,9 @@ static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
rsrc_data->common_data;
/* Disble WM */
+ cam_io_w_mb(0x0,
+ common_data->mem_base + rsrc_data->hw_regs->cfg);
+
/* Disable all register access, reply on global reset */
CAM_DBG(CAM_ISP, "WM res %d irq_enabled %d",
rsrc_data->index, rsrc_data->irq_enabled);
@@ -2291,9 +2294,10 @@ static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_client = NULL;
uint32_t *reg_val_pair;
- uint32_t i, j, size = 0;
+ uint32_t i, j, k, size = 0;
uint32_t frame_inc = 0, ubwc_bw_limit = 0, camera_hw_version, val;
int rc = 0;
+ uint32_t loop_size = 0;
bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args;
@@ -2492,20 +2496,6 @@ static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
}
}
- /* WM Image address */
- if (wm_data->en_ubwc)
- CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
- wm_data->hw_regs->image_addr,
- (update_buf->wm_update->image_buf[i] +
- io_cfg->planes[i].meta_size));
- else
- CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
- wm_data->hw_regs->image_addr,
- update_buf->wm_update->image_buf[i] +
- wm_data->offset);
- CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
- wm_data->index, reg_val_pair[j-1]);
-
if (wm_data->en_ubwc) {
frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
io_cfg->planes[i].slice_height, 4096);
@@ -2521,6 +2511,28 @@ static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
io_cfg->planes[i].slice_height;
}
+ if (wm_data->index < 3)
+ loop_size = wm_data->irq_subsample_period + 1;
+ else
+ loop_size = 1;
+
+ /* WM Image address */
+ for (k = 0; k < loop_size; k++) {
+ if (wm_data->en_ubwc)
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ update_buf->wm_update->image_buf[i] +
+ io_cfg->planes[i].meta_size +
+ k * frame_inc);
+ else
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->image_addr,
+ update_buf->wm_update->image_buf[i] +
+ wm_data->offset + k * frame_inc);
+ CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
+ wm_data->index, reg_val_pair[j-1]);
+ }
+
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->frame_inc, frame_inc);
CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
@@ -2591,6 +2603,13 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
wm_data = vfe_out_data->wm_res[i]->res_priv;
+ if (wm_data->index <= 2 && hfr_cfg->subsample_period > 3) {
+ CAM_ERR(CAM_ISP,
+ "RDI doesn't support irq subsample period %d",
+ hfr_cfg->subsample_period);
+ return -EINVAL;
+ }
+
if ((wm_data->framedrop_pattern !=
hfr_cfg->framedrop_pattern) ||
!wm_data->hfr_cfg_done) {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index 0f34c9f15b56..fa8984ca2300 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -150,7 +150,7 @@ static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
int rc = -EINVAL;
uint32_t num_in_buf, num_out_buf, i, j, plane;
struct cam_buf_io_cfg *io_cfg;
- uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
size_t size;
num_in_buf = 0;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index d192018bf2f2..0e77a4c01fb4 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -147,7 +147,7 @@ static void cam_mem_put_slot(int32_t idx)
}
int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
- uint64_t *iova_ptr, size_t *len_ptr)
+ dma_addr_t *iova_ptr, size_t *len_ptr)
{
int rc = 0, idx;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 7588c179f4a5..14b1a678e010 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -82,7 +82,7 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
* @return Status of operation. Negative in case of error. Zero otherwise.
*/
int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
- uint64_t *iova_ptr, size_t *len_ptr);
+ dma_addr_t *iova_ptr, size_t *len_ptr);
/**
* @brief: Returns CPU address information about buffer
*
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 61fd7a85290d..460b3dfe59a2 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1283,7 +1283,8 @@ static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
rc = dev->ops->link_setup(&link_data);
if (rc)
CAM_ERR(CAM_CRM,
- "Unlink failed dev_hdl %d",
+ "Unlink failed dev name %s hdl %x",
+ dev->dev_info.name,
dev->dev_hdl);
}
dev->dev_hdl = 0;
@@ -2350,8 +2351,8 @@ static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
/* Destroy the link handle */
rc = cam_destroy_device_hdl(link->link_hdl);
if (rc < 0) {
- CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
- rc, link->link_hdl);
+ CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d",
+ link->link_hdl, rc);
}
mutex_unlock(&link->lock);
@@ -2552,8 +2553,7 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
rc = __cam_req_mgr_unlink(link);
/* Free curent link and put back into session's free pool of links */
- if (!rc)
- __cam_req_mgr_unreserve_link(cam_session, link);
+ __cam_req_mgr_unreserve_link(cam_session, link);
done:
mutex_unlock(&g_crm_core_dev->crm_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 058e3528e248..f859484b178f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -870,8 +870,8 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl)
{
int32_t rc = 0;
- uint32_t val = 0, i = 0;
- unsigned long rem_jiffies;
+ uint32_t val = 0, i = 0, j = 0;
+ unsigned long rem_jiffies, flags;
int32_t read_words = 0, exp_words = 0;
int32_t index = 0, first_byte = 0, total_read_words = 0;
enum cci_i2c_master_t master;
@@ -990,11 +990,13 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
val = 1 << ((master * 2) + queue);
cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+
exp_words = ((read_cfg->num_byte / 4) + 1);
+ CAM_DBG(CAM_CCI, "waiting for threshold [exp_words %d]", exp_words);
- while (exp_words != total_read_words) {
+ while (total_read_words != exp_words) {
rem_jiffies = wait_for_completion_timeout(
- &cci_dev->cci_master_info[master].reset_complete,
+ &cci_dev->cci_master_info[master].th_complete,
CCI_TIMEOUT);
if (!rem_jiffies) {
rc = -ETIMEDOUT;
@@ -1013,8 +1015,16 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
read_words = cam_io_r_mb(base +
CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ if (read_words <= 0) {
+ CAM_DBG(CAM_CCI, "FIFO Buffer lvl is 0");
+ continue;
+ }
+
+ j++;
+ CAM_DBG(CAM_CCI, "Iteration: %u read_words %d", j, read_words);
+
total_read_words += read_words;
- do {
+ while (read_words > 0) {
val = cam_io_r_mb(base +
CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
for (i = 0; (i < 4) &&
@@ -1032,9 +1042,57 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
index++;
}
}
- } while (--read_words > 0);
+ read_words--;
+ }
+
+ CAM_DBG(CAM_CCI, "Iteraion:%u total_read_words %d",
+ j, total_read_words);
+
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ if (cci_dev->irq_status1) {
+ CAM_DBG(CAM_CCI, "clear irq_status1:%x",
+ cci_dev->irq_status1);
+ cam_io_w_mb(cci_dev->irq_status1,
+ base + CCI_IRQ_CLEAR_1_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ cci_dev->irq_status1 = 0;
+ }
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+
+ if (total_read_words == exp_words) {
+ /*
+ * This wait is for RD_DONE irq, if RD_DONE is
+ * triggered we will call complete on both threshold
+ * & read done waits. As part of the threshold wait
+ * we will be draining the entire buffer out. This
+ * wait is to compensate for the complete invoked for
+ * RD_DONE exclusively.
+ */
+ rem_jiffies = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ val = cam_io_r_mb(base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+ master * 0x100);
+ CAM_ERR(CAM_CCI,
+ "Failed to receive RD_DONE irq rc = %d FIFO buf_lvl:0x%x",
+ rc, val);
+ #ifdef DUMP_CCI_REGISTERS
+ cam_cci_dump_registers(cci_dev,
+ master, queue);
+ #endif
+ cam_cci_flush_queue(cci_dev, master);
+ goto rel_mutex;
+ }
+ break;
+ }
}
+ CAM_DBG(CAM_CCI, "Burst read successful words_read %d",
+ total_read_words);
+
rel_mutex:
mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
return rc;
@@ -1166,7 +1224,8 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
val = 1 << ((master * 2) + queue);
cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
- CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
+ CAM_DBG(CAM_CCI,
+ "waiting_for_rd_done [exp_words: %d]", exp_words);
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
@@ -1200,7 +1259,7 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
index = 0;
CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
first_byte = 0;
- do {
+ while (read_words > 0) {
val = cam_io_r_mb(base +
CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
CAM_DBG(CAM_CCI, "read val 0x%x", val);
@@ -1217,10 +1276,10 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
index++;
}
}
- } while (--read_words > 0);
+ read_words--;
+ }
rel_mutex:
mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
-
return rc;
}
@@ -1399,23 +1458,34 @@ static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
}
read_bytes = read_cfg->num_byte;
+
+ /*
+ * To avoid any conflicts due to back to back trigger of
+ * THRESHOLD irq's, we reinit the threshold wait before
+ * we load the burst read cmd.
+ */
+ reinit_completion(&cci_dev->cci_master_info[master].th_complete);
+
+ CAM_DBG(CAM_CCI, "Bytes to read %u", read_bytes);
do {
- if (read_bytes > CCI_I2C_MAX_BYTE_COUNT)
+ if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT)
read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
else
read_cfg->num_byte = read_bytes;
- if (read_cfg->num_byte > CCI_READ_MAX)
+ if (read_cfg->num_byte >= CCI_READ_MAX) {
+ cci_dev->is_burst_read = true;
rc = cam_cci_burst_read(sd, c_ctrl);
- else
+ } else {
+ cci_dev->is_burst_read = false;
rc = cam_cci_read(sd, c_ctrl);
-
+ }
if (rc) {
CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
goto ERROR;
}
- if (read_bytes > CCI_I2C_MAX_BYTE_COUNT) {
+ if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) {
read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
read_cfg->data_type);
read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
@@ -1426,6 +1496,7 @@ static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
} while (read_bytes);
ERROR:
+ cci_dev->is_burst_read = false;
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index ce7ac3f0b820..958737f1a879 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -65,15 +65,12 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
&cci_dev->soc_info;
void __iomem *base = soc_info->reg_map[0].mem_base;
unsigned long flags;
- bool burst_read_assert = false;
+ bool rd_done_th_assert = false;
irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
- cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
- cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
- cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
-
CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+
if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
cci_dev->cci_master_info[MASTER_0].reset_pending =
@@ -92,18 +89,23 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
(irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
cci_dev->cci_master_info[MASTER_0].status = 0;
+ rd_done_th_assert = true;
+ complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
- burst_read_assert = true;
}
if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
- (!burst_read_assert)) {
+ (!rd_done_th_assert)) {
cci_dev->cci_master_info[MASTER_0].status = 0;
+ rd_done_th_assert = true;
+ if (cci_dev->is_burst_read)
+ complete(
+ &cci_dev->cci_master_info[MASTER_0].th_complete);
complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
}
if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
- (!burst_read_assert)) {
+ (!rd_done_th_assert)) {
cci_dev->cci_master_info[MASTER_0].status = 0;
- complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
}
if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
@@ -142,18 +144,23 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
(irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
cci_dev->cci_master_info[MASTER_1].status = 0;
+ rd_done_th_assert = true;
+ complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
- burst_read_assert = true;
}
if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
- (!burst_read_assert)) {
+ (!rd_done_th_assert)) {
cci_dev->cci_master_info[MASTER_1].status = 0;
+ rd_done_th_assert = true;
+ if (cci_dev->is_burst_read)
+ complete(
+ &cci_dev->cci_master_info[MASTER_1].th_complete);
complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
}
if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
- (!burst_read_assert)) {
+ (!rd_done_th_assert)) {
cci_dev->cci_master_info[MASTER_1].status = 0;
- complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
}
if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
struct cam_cci_master_info *cci_master_info;
@@ -189,6 +196,12 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
flags);
}
+ if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE)
+ CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_0");
+
+ if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE)
+ CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_1");
+
if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
cam_io_w_mb(CCI_M0_RESET_RMSK,
@@ -211,6 +224,19 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
base + CCI_HALT_REQ_ADDR);
CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
}
+
+ if ((rd_done_th_assert) || (!cci_dev->is_burst_read)) {
+ cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
+ CAM_DBG(CAM_CCI, "clear irq_status0:%x irq_status1:%x",
+ irq_status0, irq_status1);
+ } else {
+ spin_lock_irqsave(&cci_dev->lock_status, flags);
+ cci_dev->irq_status1 |= irq_status1;
+ spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+ }
+
+ cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index d48ffd15b4f9..aec86baf89aa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -137,6 +137,7 @@ struct cam_cci_master_info {
uint8_t reset_pending;
struct mutex mutex;
struct completion reset_complete;
+ struct completion th_complete;
struct mutex mutex_q[NUM_QUEUES];
struct completion report_q[NUM_QUEUES];
atomic_t done_pending[NUM_QUEUES];
@@ -192,6 +193,11 @@ enum cam_cci_state_t {
* @cci_wait_sync_cfg: CCI sync config
* @cycles_per_us: Cycles per micro sec
* @payload_size: CCI packet payload size
+ * @irq_status1: Store irq_status1 to be cleared after
+ * draining FIFO buffer for burst read
+ * @lock_status: to protect changes to irq_status1
+ * @is_burst_read: Flag to determine if we are performing
+ * a burst read operation or not
*/
struct cci_device {
struct v4l2_subdev subdev;
@@ -216,6 +222,9 @@ struct cci_device {
uint8_t payload_size;
char device_name[20];
uint32_t cpas_handle;
+ uint32_t irq_status1;
+ spinlock_t lock_status;
+ bool is_burst_read;
};
enum cam_cci_i2c_cmd_type {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
index 31c8e26f3f90..65a48e11fbd7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -56,15 +56,17 @@
#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD 0x100000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE 0x200000
#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6
#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000
#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD 0x10000
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE 0x20000
#define CCI_I2C_M0_RD_THRESHOLD_ADDR 0x00000120
#define CCI_I2C_M1_RD_THRESHOLD_ADDR 0x00000220
-#define CCI_I2C_RD_THRESHOLD_VALUE 0x38
+#define CCI_I2C_RD_THRESHOLD_VALUE 0x30
#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
#define DEBUG_TOP_REG_START 0x0
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index e0b27ca1e92a..8c2853b5cc62 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -199,6 +199,8 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
mutex_init(&new_cci_dev->cci_master_info[i].mutex);
init_completion(
&new_cci_dev->cci_master_info[i].reset_complete);
+ init_completion(
+ &new_cci_dev->cci_master_info[i].th_complete);
for (j = 0; j < NUM_QUEUES; j++) {
mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
@@ -208,6 +210,7 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
&new_cci_dev->cci_master_info[i].lock_q[j]);
}
}
+ spin_lock_init(&new_cci_dev->lock_status);
}
static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index bc61df42bb94..2e9aa6cbb1db 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -296,12 +296,14 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
continue;
}
- settle_cnt = (csiphy_dev->csiphy_info.settle_time / 200000000);
+ settle_cnt = div64_u64(csiphy_dev->csiphy_info.settle_time,
+ 200000000);
if (csiphy_dev->csiphy_info.combo_mode == 1 &&
(lane_pos >= 3))
settle_cnt =
- (csiphy_dev->csiphy_info.settle_time_combo_sensor /
- 200000000);
+ div64_u64(csiphy_dev->csiphy_info.
+ settle_time_combo_sensor,
+ 200000000);
for (i = 0; i < cfg_size; i++) {
switch (reg_array[lane_pos][i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index e5df874b52ae..c5438c917bc0 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -21,6 +21,13 @@
struct sync_device *sync_dev;
+/*
+ * Flag to determine whether to enqueue cb of a
+ * signaled fence onto the workq or invoke it
+ * directly in the same context
+ */
+static bool trigger_cb_without_switch;
+
int cam_sync_create(int32_t *sync_obj, const char *name)
{
int rc;
@@ -59,6 +66,7 @@ int cam_sync_register_callback(sync_callback cb_func,
struct sync_callback_info *sync_cb;
struct sync_callback_info *cb_info;
struct sync_table_row *row = NULL;
+ int status = 0;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
return -EINVAL;
@@ -95,18 +103,27 @@ int cam_sync_register_callback(sync_callback cb_func,
if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
(!row->remaining)) {
- sync_cb->callback_func = cb_func;
- sync_cb->cb_data = userdata;
- sync_cb->sync_obj = sync_obj;
- INIT_WORK(&sync_cb->cb_dispatch_work,
- cam_sync_util_cb_dispatch);
- sync_cb->status = row->state;
- CAM_DBG(CAM_SYNC, "Callback trigger for sync object:%d",
- sync_cb->sync_obj);
- queue_work(sync_dev->work_queue,
- &sync_cb->cb_dispatch_work);
+ if (trigger_cb_without_switch) {
+ CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
+ sync_obj);
+ status = row->state;
+ kfree(sync_cb);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ cb_func(sync_obj, status, userdata);
+ } else {
+ sync_cb->callback_func = cb_func;
+ sync_cb->cb_data = userdata;
+ sync_cb->sync_obj = sync_obj;
+ INIT_WORK(&sync_cb->cb_dispatch_work,
+ cam_sync_util_cb_dispatch);
+ sync_cb->status = row->state;
+ CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
+ sync_cb->sync_obj);
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+ spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+ }
- spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return 0;
}
@@ -158,19 +175,11 @@ int cam_sync_deregister_callback(sync_callback cb_func,
int cam_sync_signal(int32_t sync_obj, uint32_t status)
{
- int rc;
struct sync_table_row *row = NULL;
struct sync_table_row *parent_row = NULL;
- struct sync_callback_info *sync_cb;
- struct sync_user_payload *payload_info;
- struct sync_parent_info *parent_info;
- struct list_head sync_list;
- struct cam_signalable_info *list_info = NULL;
- struct cam_signalable_info *temp_list_info = NULL;
+ struct sync_parent_info *parent_info, *temp_parent_info;
struct list_head parents_list;
-
- /* Objects to be signaled will be added into this list */
- INIT_LIST_HEAD(&sync_list);
+ int rc = 0;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
@@ -213,14 +222,7 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
}
row->state = status;
- rc = cam_sync_util_add_to_signalable_list(sync_obj, status, &sync_list);
- if (rc < 0) {
- spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- CAM_ERR(CAM_SYNC,
- "Error: Unable to add sync object :%d to signalable list",
- sync_obj);
- return rc;
- }
+ cam_sync_util_dispatch_signaled_cb(sync_obj, status);
/* copy parent list to local and release child lock */
INIT_LIST_HEAD(&parents_list);
@@ -228,13 +230,14 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (list_empty(&parents_list))
- goto dispatch_cb;
+ return 0;
/*
* Now iterate over all parents of this object and if they too need to
- * be signaled add them to the list
+ * be signaled dispatch cb's
*/
- list_for_each_entry(parent_info,
+ list_for_each_entry_safe(parent_info,
+ temp_parent_info,
&parents_list,
list) {
parent_row = sync_dev->sync_table + parent_info->sync_id;
@@ -253,93 +256,16 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
continue;
}
- if (!parent_row->remaining) {
- rc = cam_sync_util_add_to_signalable_list
- (parent_info->sync_id,
- parent_row->state,
- &sync_list);
- if (rc < 0) {
- spin_unlock_bh(
- &sync_dev->row_spinlocks[
- parent_info->sync_id]);
- continue;
- }
- }
- spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
- }
-
-dispatch_cb:
-
- /*
- * Now dispatch the various sync objects collected so far, in our
- * list
- */
- list_for_each_entry_safe(list_info,
- temp_list_info,
- &sync_list,
- list) {
- struct sync_table_row *signalable_row = NULL;
- struct sync_callback_info *temp_sync_cb;
- struct sync_user_payload *temp_payload_info;
-
- signalable_row = sync_dev->sync_table + list_info->sync_obj;
-
- spin_lock_bh(&sync_dev->row_spinlocks[list_info->sync_obj]);
- if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
- spin_unlock_bh(
- &sync_dev->row_spinlocks[list_info->sync_obj]);
- continue;
- }
-
- /* Dispatch kernel callbacks if any were registered earlier */
-
- list_for_each_entry_safe(sync_cb,
- temp_sync_cb, &signalable_row->callback_list, list) {
- sync_cb->status = list_info->status;
- list_del_init(&sync_cb->list);
- queue_work(sync_dev->work_queue,
- &sync_cb->cb_dispatch_work);
- }
-
- /* Dispatch user payloads if any were registered earlier */
- list_for_each_entry_safe(payload_info, temp_payload_info,
- &signalable_row->user_payload_list, list) {
- spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
- if (!sync_dev->cam_sync_eventq) {
- spin_unlock_bh(
- &sync_dev->cam_sync_eventq_lock);
- break;
- }
- spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
- cam_sync_util_send_v4l2_event(
- CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
- list_info->sync_obj,
- list_info->status,
- payload_info->payload_data,
- CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
-
- list_del_init(&payload_info->list);
- /*
- * We can free the list node here because
- * sending V4L event will make a deep copy
- * anyway
- */
- kfree(payload_info);
- }
-
- /*
- * This needs to be done because we want to unblock anyone
- * who might be blocked and waiting on this sync object
- */
- complete_all(&signalable_row->signaled);
-
- spin_unlock_bh(&sync_dev->row_spinlocks[list_info->sync_obj]);
+ if (!parent_row->remaining)
+ cam_sync_util_dispatch_signaled_cb(
+ parent_info->sync_id, parent_row->state);
- list_del_init(&list_info->list);
- kfree(list_info);
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
}
- return rc;
+ return 0;
}
int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
@@ -965,6 +891,26 @@ static void cam_sync_init_entity(struct sync_device *sync_dev)
}
#endif
+static int cam_sync_create_debugfs(void)
+{
+ sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
+
+ if (!sync_dev->dentry) {
+ CAM_ERR(CAM_SYNC, "Failed to create sync dir");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_bool("trigger_cb_without_switch",
+ 0644, sync_dev->dentry,
+ &trigger_cb_without_switch)) {
+ CAM_ERR(CAM_SYNC,
+ "failed to create trigger_cb_without_switch entry");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int cam_sync_probe(struct platform_device *pdev)
{
int rc;
@@ -1030,6 +976,9 @@ static int cam_sync_probe(struct platform_device *pdev)
goto v4l2_fail;
}
+ trigger_cb_without_switch = false;
+ cam_sync_create_debugfs();
+
return rc;
v4l2_fail:
@@ -1049,6 +998,8 @@ static int cam_sync_remove(struct platform_device *pdev)
v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
cam_sync_media_controller_cleanup(sync_dev);
video_device_release(sync_dev->vdev);
+ debugfs_remove_recursive(sync_dev->dentry);
+ sync_dev->dentry = NULL;
kfree(sync_dev);
sync_dev = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
index 5ae707a2b6e7..38dab42a56cc 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -177,6 +178,7 @@ struct cam_signalable_info {
* @row_spinlocks : Spinlock array, one for each row in the table
* @table_lock : Mutex used to lock the table
* @open_cnt : Count of file open calls made on the sync driver
+ * @dentry : Debugfs entry
* @work_queue : Work queue used for dispatching kernel callbacks
* @cam_sync_eventq : Event queue used to dispatch user payloads to user space
* @bitmap : Bitmap representation of all sync objects
@@ -188,6 +190,7 @@ struct sync_device {
spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS];
struct mutex table_lock;
int open_cnt;
+ struct dentry *dentry;
struct workqueue_struct *work_queue;
struct v4l2_fh *cam_sync_eventq;
spinlock_t cam_sync_eventq_lock;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index f391c8c8d51d..49a9d2f39974 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -287,6 +287,64 @@ void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
kfree(cb_info);
}
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+ uint32_t status)
+{
+ struct sync_callback_info *sync_cb;
+ struct sync_user_payload *payload_info;
+ struct sync_callback_info *temp_sync_cb;
+ struct sync_table_row *signalable_row;
+ struct sync_user_payload *temp_payload_info;
+
+ signalable_row = sync_dev->sync_table + sync_obj;
+ if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_DBG(CAM_SYNC,
+ "Accessing invalid sync object:%i", sync_obj);
+ return;
+ }
+
+ /* Dispatch kernel callbacks if any were registered earlier */
+ list_for_each_entry_safe(sync_cb,
+ temp_sync_cb, &signalable_row->callback_list, list) {
+ sync_cb->status = status;
+ list_del_init(&sync_cb->list);
+ queue_work(sync_dev->work_queue,
+ &sync_cb->cb_dispatch_work);
+ }
+
+ /* Dispatch user payloads if any were registered earlier */
+ list_for_each_entry_safe(payload_info, temp_payload_info,
+ &signalable_row->user_payload_list, list) {
+ spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+ if (!sync_dev->cam_sync_eventq) {
+ spin_unlock_bh(
+ &sync_dev->cam_sync_eventq_lock);
+ break;
+ }
+ spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+ cam_sync_util_send_v4l2_event(
+ CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+ sync_obj,
+ status,
+ payload_info->payload_data,
+ CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+ list_del_init(&payload_info->list);
+ /*
+ * We can free the list node here because
+ * sending V4L event will make a deep copy
+ * anyway
+ */
+ kfree(payload_info);
+ }
+
+ /*
+ * This needs to be done because we want to unblock anyone
+ * who might be blocked and waiting on this sync object
+ */
+ complete_all(&signalable_row->signaled);
+}
+
void cam_sync_util_send_v4l2_event(uint32_t id,
uint32_t sync_obj,
int status,
@@ -312,26 +370,6 @@ void cam_sync_util_send_v4l2_event(uint32_t id,
sync_obj);
}
-int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
- uint32_t status,
- struct list_head *sync_list)
-{
- struct cam_signalable_info *signalable_info = NULL;
-
- signalable_info = kzalloc(sizeof(*signalable_info), GFP_ATOMIC);
- if (!signalable_info)
- return -ENOMEM;
-
- signalable_info->sync_obj = sync_obj;
- signalable_info->status = status;
-
- list_add_tail(&signalable_info->list, sync_list);
- CAM_DBG(CAM_SYNC, "Add sync_obj :%d with status :%d to signalable list",
- sync_obj, status);
-
- return 0;
-}
-
int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
int new_state)
{
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index a9d6f86c1709..cfa450c0e744 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -87,6 +87,17 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work);
/**
+ * @brief: Function to dispatch callbacks for a signaled sync object
+ *
+ * @sync_obj : Sync object that is signaled
+ * @status : Status of the signaled object
+ *
+ * @return None
+ */
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+ uint32_t status);
+
+/**
* @brief: Function to send V4L event to user space
* @param id : V4L event id to send
* @param sync_obj : Sync obj for which event needs to be sent
@@ -103,19 +114,6 @@ void cam_sync_util_send_v4l2_event(uint32_t id,
int len);
/**
- * @brief: Function which adds sync object information to the signalable list
- *
- * @param sync_obj : Sync object to add
- * @param status : Status of above sync object
- * @param list : Linked list where the information should be added to
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int cam_sync_util_add_to_signalable_list(int32_t sync_obj,
- uint32_t status,
- struct list_head *sync_list);
-
-/**
* @brief: Function which gets the next state of the sync object based on the
* current state and the new state
*
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index db2629d2a2f9..a88ccdb93641 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -127,7 +127,7 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
int32_t iommu_hdl, int32_t sec_mmu_hdl)
{
struct cam_patch_desc *patch_desc = NULL;
- uint64_t iova_addr;
+ dma_addr_t iova_addr;
uint64_t cpu_addr;
uint32_t temp;
uint32_t *dst_cpu_addr;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index c84490fa108b..4a9d06b30ba9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -154,6 +154,10 @@ static struct msm_vidc_common_data sdm845_common_data[] = {
.key = "qcom,debug-timeout",
.value = 0,
},
+ {
+ .key = "qcom,enable-idle-indicator",
+ .value = 1,
+ },
};
static struct msm_vidc_common_data sdm670_common_data_v0[] = {
@@ -205,6 +209,10 @@ static struct msm_vidc_common_data sdm670_common_data_v0[] = {
.key = "qcom,hw-resp-timeout",
.value = 1000,
},
+ {
+ .key = "qcom,enable-idle-indicator",
+ .value = 1,
+ },
};
static struct msm_vidc_common_data sdm670_common_data_v1[] = {
@@ -256,6 +264,10 @@ static struct msm_vidc_common_data sdm670_common_data_v1[] = {
.key = "qcom,hw-resp-timeout",
.value = 1000,
},
+ {
+ .key = "qcom,enable-idle-indicator",
+ .value = 1,
+ },
};
static struct msm_vidc_efuse_data sdm670_efuse_data[] = {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index c4ad50a33ec7..79ffbb3063fb 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -2744,7 +2744,7 @@ static void venus_hfi_pm_handler(struct work_struct *work)
{
int rc = 0;
u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
- int count = 0;
+ int pc_count = 0, idle_count = 0;
const int max_tries = 10;
struct venus_hfi_device *device = list_first_entry(
&hal_ctxt.dev_head, struct venus_hfi_device, list);
@@ -2793,8 +2793,16 @@ static void venus_hfi_pm_handler(struct work_struct *work)
wfi_status);
goto skip_power_off;
}
- if (device->res->sys_idle_indicator &&
- !(idle_status & BIT(30))) {
+ while (device->res->sys_idle_indicator &&
+ idle_count < max_tries) {
+ if (idle_status & BIT(30))
+ break;
+ usleep_range(50, 100);
+ idle_status = __read_register(device,
+ VIDC_CPU_CS_SCIACMDARG0);
+ idle_count++;
+ }
+ if (idle_count == max_tries) {
dprintk(VIDC_WARN,
"Skipping PC as idle_status (%#x) bit not set\n",
idle_status);
@@ -2807,7 +2815,7 @@ static void venus_hfi_pm_handler(struct work_struct *work)
goto skip_power_off;
}
- while (count < max_tries) {
+ while (pc_count < max_tries) {
wfi_status = __read_register(device,
VIDC_WRAPPER_CPU_STATUS);
pc_ready = __read_register(device,
@@ -2816,10 +2824,10 @@ static void venus_hfi_pm_handler(struct work_struct *work)
VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY))
break;
usleep_range(150, 250);
- count++;
+ pc_count++;
}
- if (count == max_tries) {
+ if (pc_count == max_tries) {
dprintk(VIDC_ERR,
"Skip PC. Core is not in right state (%#x, %#x)\n",
wfi_status, pc_ready);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3f4c3989870..f405125fbbde 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -826,3 +826,13 @@ source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
endmenu
+
+config OKL4_LINK_SHBUF
+ tristate "OKL4 link with shared buffer transport"
+ default y
+ depends on OKL4_GUEST
+ help
+ Enable driver for OKL4 inter-cell links using the "shared-buffer"
+ transport. This driver presents the link to Linux as a character device
+ which can be written to or read from to access the shared memory. An ioctl
+ on the device is used to send a virtual interrupt to the partner cell.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 744001af414f..d57d21f54df1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -75,3 +75,5 @@ OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
$(call if_changed,objcopy)
+
+obj-$(CONFIG_OKL4_LINK_SHBUF) += okl4-link-shbuf.o
diff --git a/drivers/misc/okl4-link-shbuf.c b/drivers/misc/okl4-link-shbuf.c
new file mode 100644
index 000000000000..de65ea05d2a3
--- /dev/null
+++ b/drivers/misc/okl4-link-shbuf.c
@@ -0,0 +1,667 @@
+/*
+ * Driver for inter-cell links using the shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/atomic.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/version.h>
+#include <microvisor/microvisor.h>
+#include <uapi/linux/okl4-link-shbuf.h>
+
+static const char DEVICE_NAME[] = "okl4_link_shbuf";
+
+/* Created devices will appear as /dev/<DEV_PREFIX><name> */
+static const char DEV_PREFIX[] = "okl4-";
+
+static const struct of_device_id okl4_link_shbuf_match[] = {
+ {
+ .compatible = "okl,microvisor-link-shbuf",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, okl4_link_shbuf_match);
+
+static struct class *link_shbuf_class;
+static dev_t link_shbuf_dev;
+
+/* A lock used to protect access to link_shbuf_dev */
+static spinlock_t device_number_allocate;
+
+/* Sentinel values for indicating missing communication channels */
+static const u32 NO_OUTGOING_IRQ = 0;
+static const int NO_INCOMING_IRQ = -1;
+
+/* Private data for this driver */
+struct link_shbuf_data {
+
+ /* Outgoing vIRQ */
+ u32 virqline;
+
+ /* Incoming vIRQ */
+ int virq;
+ atomic64_t virq_payload;
+ bool virq_pending;
+ wait_queue_head_t virq_wq;
+
+ /* Shared memory region */
+ void *base;
+ fmode_t permissions;
+ struct resource buffer;
+
+ /* Device data */
+ dev_t devt;
+ struct device *dev;
+ struct cdev cdev;
+
+};
+
+static bool link_shbuf_data_invariant(const struct link_shbuf_data *priv)
+{
+ if (!priv)
+ return false;
+
+ if (!priv->base || (uintptr_t)priv->base % PAGE_SIZE != 0)
+ return false;
+
+ if (resource_size(&priv->buffer) == 0)
+ return false;
+
+ if (!priv->dev)
+ return false;
+
+ return true;
+}
+
+static bool link_shbuf_valid_access(size_t size, loff_t pos, size_t count)
+{
+ return pos < size && count <= size && size - count >= pos;
+}
+
+static ssize_t link_shbuf_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ long remaining;
+ const struct link_shbuf_data *priv;
+
+ /* The file should have been opened with read access to reach here */
+ if (WARN_ON(!(file->f_mode & FMODE_READ)))
+ return -EINVAL;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+ return -EINVAL;
+
+ remaining = copy_to_user(buffer, priv->base + *ppos, count);
+ *ppos += count - remaining;
+ return count - remaining;
+}
+
+static ssize_t link_shbuf_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ long remaining;
+ const struct link_shbuf_data *priv;
+
+ /* The file should have been opened with write access to reach here */
+ if (WARN_ON(!(file->f_mode & FMODE_WRITE)))
+ return -EINVAL;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+ return -EINVAL;
+
+ remaining = copy_from_user(priv->base + *ppos, buffer, count);
+ *ppos += count - remaining;
+ return count - remaining;
+}
+
+static unsigned int link_shbuf_poll(struct file *file, poll_table *table)
+{
+ struct link_shbuf_data *priv;
+ unsigned int mask;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return POLLERR;
+
+ poll_wait(file, &priv->virq_wq, table);
+
+ /* The shared memory is always considered ready for reading and writing. */
+ mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+ if (priv->virq_pending)
+ mask |= POLLPRI;
+
+ return mask;
+}
+
+static long link_shbuf_ioctl_irq_tx(const struct link_shbuf_data *priv,
+ unsigned long arg)
+{
+ okl4_error_t err;
+ u64 payload;
+ const u64 __user *user_arg = (const u64 __user*)arg;
+
+ if (priv->virqline == NO_OUTGOING_IRQ)
+ return -EINVAL;
+
+#if defined(CONFIG_ARM) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+ if (copy_from_user(&payload, user_arg, sizeof(payload)))
+ return -EFAULT;
+#else
+ if (get_user(payload, user_arg))
+ return -EFAULT;
+#endif
+
+ err = _okl4_sys_vinterrupt_raise(priv->virqline, payload);
+ if (WARN_ON(err != OKL4_OK))
+ return -EINVAL;
+
+ return 0;
+}
+
+static long link_shbuf_ioctl_irq_clr(struct link_shbuf_data *priv,
+ unsigned long arg)
+{
+ u64 payload;
+ u64 __user *user_arg = (u64 __user*)arg;
+
+ /*
+ * Check validity of the user pointer before clearing the interrupt to avoid
+ * races involved with having to undo the latter.
+ */
+ if (!access_ok(VERIFY_WRITE, user_arg, sizeof(*user_arg)))
+ return -EFAULT;
+
+ /*
+ * Note that the clearing of the pending flag can race with the setting of
+ * this flag in the IRQ handler. It is up to the user to coordinate these
+ * actions.
+ */
+ priv->virq_pending = false;
+ smp_rmb();
+ payload = atomic64_xchg(&priv->virq_payload, 0);
+
+ /* We've already checked that this access is OK, so no need for put_user. */
+ if (__put_user(payload, user_arg))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long link_shbuf_ioctl(struct file *file, unsigned int request,
+ unsigned long arg)
+{
+ struct link_shbuf_data *priv;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ /* We only support two ioctls */
+ switch (request) {
+
+ case OKL4_LINK_SHBUF_IOCTL_IRQ_TX:
+ return link_shbuf_ioctl_irq_tx(priv, arg);
+
+ case OKL4_LINK_SHBUF_IOCTL_IRQ_CLR:
+ return link_shbuf_ioctl_irq_clr(priv, arg);
+
+ }
+
+ /*
+ * Handy for debugging when userspace is linking against ioctl headers from
+ * a different kernel revision.
+ */
+ dev_dbg(priv->dev, "ioctl request 0x%x received which did not match either "
+ "OKL4_LINK_SHBUF_IOCTL_IRQ_TX (0x%x) or OKL4_LINK_SHBUF_IOCTL_IRQ_CLR "
+ "(0x%x)\n", request, (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_TX,
+ (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_CLR);
+
+ return -EINVAL;
+}
+
+static int link_shbuf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ const struct link_shbuf_data *priv;
+ unsigned long offset, pfn, flags;
+ size_t size;
+ pgprot_t prot;
+
+ /* Our caller should have taken the MM semaphore. */
+ if (WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)))
+ return -EINVAL;
+
+ /*
+ * The file should have been opened with a superset of the mmap requested
+ * permissions.
+ */
+ flags = vma->vm_flags;
+ if (WARN_ON((flags & VM_READ) && !(file->f_mode & FMODE_READ)))
+ return -EINVAL;
+ if (WARN_ON((flags & VM_WRITE) && !(file->f_mode & FMODE_WRITE)))
+ return -EINVAL;
+ if (WARN_ON((flags & VM_EXEC) && !(file->f_mode & FMODE_EXEC)))
+ return -EINVAL;
+
+ /* Retrieve our private data. */
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ /* Check the mmap request is within bounds. */
+ size = vma->vm_end - vma->vm_start;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), offset, size))
+ return -EINVAL;
+
+ pfn = (priv->buffer.start + offset) >> PAGE_SHIFT;
+ prot = vm_get_page_prot(flags);
+
+ return remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
+}
+
+static bool link_shbuf_access_ok(fmode_t allowed, fmode_t request)
+{
+ static const fmode_t ACCESS_MASK = FMODE_READ|FMODE_WRITE|FMODE_EXEC;
+ fmode_t relevant = request & ACCESS_MASK;
+ return (relevant & allowed) == relevant;
+}
+
+static int link_shbuf_open(struct inode *inode, struct file *file)
+{
+ struct cdev *cdev;
+ struct link_shbuf_data *priv;
+
+ /* Retrieve a pointer to our private data */
+ cdev = inode->i_cdev;
+ priv = container_of(cdev, struct link_shbuf_data, cdev);
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_access_ok(priv->permissions, file->f_mode))
+ return -EACCES;
+
+ file->private_data = priv;
+
+ return 0;
+}
+
+static const struct file_operations link_shbuf_ops = {
+ .owner = THIS_MODULE,
+ .read = link_shbuf_read,
+ .write = link_shbuf_write,
+ .poll = link_shbuf_poll,
+ .unlocked_ioctl = link_shbuf_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = link_shbuf_ioctl,
+#endif
+#ifdef CONFIG_MMU
+ .mmap = link_shbuf_mmap,
+#endif
+ .open = link_shbuf_open,
+};
+
+/*
+ * Interrupt handler.
+ *
+ * This function will be called when our link partner uses the ioctl on their
+ * shared memory device to send an outgoing interrupt.
+ */
+static irqreturn_t link_shbuf_irq_handler(int irq, void *data)
+{
+ u64 payload, old, new;
+ struct _okl4_sys_interrupt_get_payload_return _payload;
+
+ /* Retrieve a pointer to our private data. */
+ struct link_shbuf_data *priv = data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return IRQ_NONE;
+
+ /*
+ * We should only ever be handling a single interrupt, and only if there
+ * was an incoming interrupt in the configuration.
+ */
+ if (WARN_ON(priv->virq < 0 || priv->virq != irq))
+ return IRQ_NONE;
+
+ _payload = _okl4_sys_interrupt_get_payload(irq);
+ payload = (u64)_payload.payload;
+
+ /*
+ * At this point, it is possible the pending flag is already set. It is up to
+ * the user to synchronise their transmission and acknowledgement of
+ * interrupts.
+ */
+
+ /* We open code atomic64_or which is not universally available. */
+ do {
+ old = atomic64_read(&priv->virq_payload);
+ new = old | payload;
+ } while (atomic64_cmpxchg(&priv->virq_payload, old, new) != old);
+ smp_wmb();
+ priv->virq_pending = true;
+
+ wake_up_interruptible(&priv->virq_wq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a unique device number for this device.
+ *
+ * Note that this function needs to lock its access to link_shbuf_dev as there
+ * may be multiple threads attempting to acquire a new device number.
+ */
+static int link_shbuf_allocate_device(dev_t *devt)
+{
+ int ret = 0;
+ dev_t next;
+
+ spin_lock(&device_number_allocate);
+
+ *devt = link_shbuf_dev;
+ next = MKDEV(MAJOR(link_shbuf_dev), MINOR(link_shbuf_dev) + 1);
+ /* Check for overflow */
+ if (MINOR(next) != MINOR(link_shbuf_dev) + 1)
+ ret = -ENOSPC;
+ else
+ link_shbuf_dev = next;
+
+ spin_unlock(&device_number_allocate);
+
+ return ret;
+}
+
+/*
+ * Discover and add a new shared-buffer link.
+ *
+ * In the following function, we are expecting to parse device tree entries
+ * looking like the following:
+ *
+ * hypervisor {
+ * ...
+ * interrupt-line@1d {
+ * compatible = "okl,microvisor-interrupt-line",
+ * "okl,microvisor-capability";
+ * phandle = <0x7>;
+ * reg = <0x1d>;
+ * label = "foo_virqline";
+ * };
+ * ;
+ *
+ * foo@41003000 {
+ * compatible = "okl,microvisor-link-shbuf",
+ * "okl,microvisor-shared-memory";
+ * phandle = <0xd>;
+ * reg = <0x0 0x41003000 0x2000>;
+ * label = "foo";
+ * okl,rwx = <0x6>;
+ * okl,interrupt-line = <0x7>;
+ * interrupts = <0x0 0x4 0x1>;
+ * interrupt-parent = <0x1>;
+ * };
+ */
+static int link_shbuf_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node, *virqline;
+ struct link_shbuf_data *priv;
+ const char *name;
+ u32 permissions;
+
+ node = pdev->dev.of_node;
+
+ if (!node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Retrieve the outgoing vIRQ cap. Note, this is configurable and we
+ * anticipate that it may not exist.
+ */
+ virqline = of_parse_phandle(node, "okl,interrupt-line", 0);
+ if (!virqline) {
+ priv->virqline = NO_OUTGOING_IRQ;
+ } else {
+ ret = of_property_read_u32(virqline, "reg", &priv->virqline);
+ if (ret < 0 || priv->virqline == OKL4_KCAP_INVALID) {
+ of_node_put(virqline);
+ ret = -ENODEV;
+ goto err_free_dev;
+ }
+ }
+ of_node_put(virqline);
+
+ /* Retrieve the incoming vIRQ number. Again, this is configurable and we
+ * anticipate that it may not exist.
+ */
+ priv->virq = platform_get_irq(pdev, 0);
+ if (priv->virq < 0)
+ priv->virq = NO_INCOMING_IRQ;
+
+ /* If we have a valid incoming vIRQ, register to handle it. */
+ if (priv->virq >= 0) {
+ ret = devm_request_irq(&pdev->dev, priv->virq, link_shbuf_irq_handler,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed request for IRQ\n");
+ goto err_free_dev;
+ }
+ }
+
+ init_waitqueue_head(&priv->virq_wq);
+ priv->virq_pending = false;
+
+ /* Retrieve information about the shared memory region. */
+ ret = of_address_to_resource(node, 0, &priv->buffer);
+ if (ret < 0)
+ goto err_free_irq;
+ /*
+ * We expect the Elfweaver to have validated that we have a non-NULL,
+ * page-aligned region.
+ */
+ if (WARN_ON(priv->buffer.start == 0) ||
+ WARN_ON(resource_size(&priv->buffer) % PAGE_SIZE != 0))
+ goto err_free_irq;
+ if (!devm_request_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer), dev_name(&pdev->dev))) {
+ ret = -ENODEV;
+ goto err_free_irq;
+ }
+ priv->base = devm_ioremap(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+ if (!priv->base)
+ goto err_release_region;
+
+ /* Read the permissions of the shared memory region. */
+ ret = of_property_read_u32(node, "okl,rwx", &permissions);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read shared memory permissions\n");
+ goto err_unmap_dev;
+ }
+ if (permissions & ~S_IRWXO) {
+ ret = -EINVAL;
+ goto err_unmap_dev;
+ }
+ priv->permissions = ((permissions & S_IROTH) ? FMODE_READ : 0) |
+ ((permissions & S_IWOTH) ? FMODE_WRITE : 0) |
+ ((permissions & S_IXOTH) ? FMODE_EXEC : 0);
+ if (WARN_ON(priv->permissions == 0)) {
+ ret = -EINVAL;
+ goto err_unmap_dev;
+ }
+
+ /* Retrieve the label of this device. This will be the "name" attribute of
+ * the corresponding "link" tag in the system's XML specification.
+ */
+ ret = of_property_read_string(node, "label", &name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read label\n");
+ goto err_unmap_dev;
+ }
+
+ cdev_init(&priv->cdev, &link_shbuf_ops);
+ ret = cdev_add(&priv->cdev, link_shbuf_dev, 1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add char dev region\n");
+ goto err_unmap_dev;
+ }
+
+ ret = link_shbuf_allocate_device(&priv->devt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate new device number\n");
+ goto err_unmap_dev;
+ }
+
+ /* We're now ready to create the device itself. */
+ BUG_ON(name == NULL);
+ priv->dev = device_create(link_shbuf_class, &pdev->dev, priv->devt,
+ priv, "%s%s", DEV_PREFIX, name);
+ if (IS_ERR(priv->dev)) {
+ dev_err(&pdev->dev, "failed to create device\n");
+ ret = PTR_ERR(priv->dev);
+ goto err_del_dev;
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+
+err_del_dev:
+ cdev_del(&priv->cdev);
+err_unmap_dev:
+ devm_iounmap(&pdev->dev, priv->base);
+err_release_region:
+ devm_release_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+err_free_irq:
+ if (priv->virq != NO_INCOMING_IRQ)
+ devm_free_irq(&pdev->dev, priv->virq, priv);
+err_free_dev:
+ devm_kfree(&pdev->dev, priv);
+ return ret;
+}
+
+static int link_shbuf_remove(struct platform_device *pdev)
+{
+ struct link_shbuf_data *priv;
+
+ priv = dev_get_drvdata(&pdev->dev);
+ WARN_ON(!link_shbuf_data_invariant(priv));
+
+ device_destroy(link_shbuf_class, priv->devt);
+
+ cdev_del(&priv->cdev);
+
+ /*
+ * None of the following is strictly required, as these are all managed
+ * resources, but we clean it up anyway for clarity.
+ */
+
+ devm_iounmap(&pdev->dev, priv->base);
+
+ devm_release_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+
+ if (priv->virq != NO_INCOMING_IRQ)
+ devm_free_irq(&pdev->dev, priv->virq, priv);
+
+ devm_kfree(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver of_plat_link_shbuf_driver = {
+ .driver = {
+ .name = "okl4-shbuf",
+ .owner = THIS_MODULE,
+ .of_match_table = okl4_link_shbuf_match,
+ },
+ .probe = link_shbuf_probe,
+ .remove = link_shbuf_remove,
+};
+
+/* Maximum number of minor device numbers */
+enum {
+ MAX_MINOR = 1 << MINORBITS,
+};
+
+static int __init okl4_link_shbuf_init(void)
+{
+ int ret;
+
+ link_shbuf_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(link_shbuf_class)) {
+ pr_err("failed to create class\n");
+ ret = PTR_ERR(link_shbuf_class);
+ return ret;
+ }
+
+ ret = alloc_chrdev_region(&link_shbuf_dev, 0, MAX_MINOR, DEVICE_NAME);
+ if (ret < 0) {
+ pr_err("failed to allocate char dev region\n");
+ goto err_destroy_class;
+ }
+
+ ret = platform_driver_register(&of_plat_link_shbuf_driver);
+ if (ret < 0) {
+ pr_err("failed to register driver\n");
+ goto err_unregister_dev_region;
+ }
+
+ spin_lock_init(&device_number_allocate);
+
+ return 0;
+
+err_unregister_dev_region:
+ unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+err_destroy_class:
+ class_destroy(link_shbuf_class);
+ return ret;
+}
+module_init(okl4_link_shbuf_init);
+
+static void __exit okl4_link_shbuf_exit(void)
+{
+ platform_driver_unregister(&of_plat_link_shbuf_driver);
+ unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+ class_destroy(link_shbuf_class);
+}
+module_exit(okl4_link_shbuf_exit);
+
+MODULE_DESCRIPTION("OKL4 shared buffer link driver");
+MODULE_AUTHOR("Cog Systems Pty Ltd");
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index f86ce5b85ef2..42021affb043 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -244,8 +244,8 @@ int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
}
static
-void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
- u32 slot, unsigned int bypass, short key_index)
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot,
+ unsigned int bypass, short key_index, u32 cdu_sz)
{
unsigned int ctrl_info_val = 0;
@@ -257,7 +257,7 @@ void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
/* Configure data unit size of transfer request */
ctrl_info_val |=
- (SDHCI_MSM_ICE_TR_DATA_UNIT_512_B &
+ (cdu_sz &
MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU;
@@ -335,8 +335,9 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
short key_index = 0;
- sector_t lba = 0;
+ u64 dun = 0;
unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
struct request *req;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
@@ -349,8 +350,13 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
if (!mrq)
return -EINVAL;
req = mrq->req;
- if (req) {
- lba = req->__sector;
+ if (req && req->bio) {
+ if (bio_dun(req->bio)) {
+ dun = bio_dun(req->bio);
+ cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
+ } else {
+ dun = req->__sector;
+ }
err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
if (err)
return err;
@@ -362,11 +368,12 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
if (msm_host->ice_hci_support) {
/* For ICE HCI / ICE3.0 */
- sdhci_msm_ice_hci_update_noncq_cfg(host, lba, bypass,
+ sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass,
key_index);
} else {
/* For ICE versions earlier to ICE3.0 */
- sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
+ cdu_sz);
}
return 0;
}
@@ -378,9 +385,10 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
short key_index = 0;
- sector_t lba = 0;
+ u64 dun = 0;
unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
struct request *req;
+ u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state %d\n",
@@ -392,8 +400,13 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
if (!mrq)
return -EINVAL;
req = mrq->req;
- if (req) {
- lba = req->__sector;
+ if (req && req->bio) {
+ if (bio_dun(req->bio)) {
+ dun = bio_dun(req->bio);
+ cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
+ } else {
+ dun = req->__sector;
+ }
err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
if (err)
return err;
@@ -405,11 +418,12 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
if (msm_host->ice_hci_support) {
/* For ICE HCI / ICE3.0 */
- sdhci_msm_ice_hci_update_cmdq_cfg(lba, bypass, key_index,
+ sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index,
ice_ctx);
} else {
/* For ICE versions earlier to ICE3.0 */
- sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
+ cdu_sz);
}
return 0;
}
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 3a45cf805288..8ed809153120 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -83,7 +83,7 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
/* Put it back if it is a control packet. */
if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
- return opt->backlog_rcv(sk_udp, skb);
+ return 2;
/* Skip UDP header. */
skb_pull(skb, sizeof(struct udphdr));
@@ -190,9 +190,10 @@ drop:
static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
{
+ int retval;
sock_hold(sk_udp);
- sk_receive_skb(sk_udp, skb, 0);
- return 0;
+ retval = sk_receive_skb(sk_udp, skb, 0);
+ return (retval >> 1);
}
static struct sk_buff_head delivery_queue;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 93f25973fa59..615de01da970 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -63,7 +63,15 @@ int ipa3_enable_data_path(u32 clnt_hdl)
IPADBG("Enabling data path\n");
if (IPA_CLIENT_IS_CONS(ep->client)) {
memset(&holb_cfg, 0, sizeof(holb_cfg));
- holb_cfg.en = IPA_HOLB_TMR_DIS;
+ /*
+ * Set HOLB on USB DPL CONS to avoid IPA stall
+ * if DPL client is not pulling the data
+ * on other end from IPA hw.
+ */
+ if (ep->client == IPA_CLIENT_USB_DPL_CONS)
+ holb_cfg.en = IPA_HOLB_TMR_EN;
+ else
+ holb_cfg.en = IPA_HOLB_TMR_DIS;
holb_cfg.tmr_val = 0;
res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 38bc9878dfea..7fd8e50009f0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1522,6 +1522,8 @@ int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
ipa3_ctx->nat_mem.pdn_mem.size,
ipa3_ctx->nat_mem.pdn_mem.base,
ipa3_ctx->nat_mem.pdn_mem.phys_base);
+ ipa3_ctx->nat_mem.pdn_mem.base = NULL;
+ ipa3_ctx->nat_mem.dev.is_mem_allocated = false;
}
ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index a0f1f54ff919..dacccbc4d033 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -908,6 +908,20 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
return 0;
}
+static int __ipa_rt_validate_rule_id(u16 rule_id)
+{
+ if (!rule_id)
+ return 0;
+
+ if ((rule_id < ipahal_get_rule_id_hi_bit()) ||
+ (rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
+ IPAERR_RL("Invalid rule_id provided 0x%x\n",
+ rule_id);
+ return -EPERM;
+ }
+
+ return 0;
+}
static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
struct ipa3_hdr_entry **hdr,
struct ipa3_hdr_proc_ctx_entry **proc_ctx)
@@ -1022,6 +1036,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
+ if (__ipa_rt_validate_rule_id(rule_id))
+ goto error;
tbl = __ipa_add_rt_tbl(ip, name);
if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9b9c619a3717..edb09d82a675 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -897,17 +897,16 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
req = lrbp->cmd->request;
else
return 0;
- /*
- * Right now ICE do not support variable dun but can be
- * taken as future enhancement
- * if (bio_dun(req->bio)) {
- * dun @bio can be split, so we have to adjust offset
- * *dun = bio_dun(req->bio);
- * } else
- */
+
+ /* Use request LBA or given dun as the DUN value */
if (req->bio) {
- *dun = req->bio->bi_iter.bi_sector;
- *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+ if (bio_dun(req->bio)) {
+ /* dun @bio can be split, so we have to adjust offset */
+ *dun = bio_dun(req->bio);
+ } else {
+ *dun = req->bio->bi_iter.bi_sector;
+ *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+ }
}
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 770f056e69d2..07bad4991efc 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -787,3 +787,19 @@ config QCOM_QDSS_BRIDGE
MHI and USB interface. If unsure, say N.
source "drivers/soc/qcom/wcnss/Kconfig"
+
+config QCOM_HYP_CORE_CTL
+ bool "CPU reservation scheme for Hypervisor"
+ help
+ This driver reserve the specified CPUS by isolating them. The reserved
+ CPUs can be assigned to the other guest OS by the hypervisor.
+ An offline CPU is considered as a reserved CPU since this OS can't use
+ it.
+
+config QCOM_HYP_CORE_CTL_RESERVE_CPUS
+ string "Reserve CPUs for HYP_CORE_CTL"
+ depends on QCOM_HYP_CORE_CTL
+ default "4-5" if ARCH_SDM670
+ help
+ A compile time knob for specifying the cpumask that contains the CPUs
+ to be reserved by the QCOM_HYP_CORE_CTL driver.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 025576184df0..ee692651faa7 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -97,3 +97,4 @@ obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
obj-$(CONFIG_WCNSS_CORE) += wcnss/
+obj-$(CONFIG_QCOM_HYP_CORE_CTL) += hyp_core_ctl.o
diff --git a/drivers/soc/qcom/hyp_core_ctl.c b/drivers/soc/qcom/hyp_core_ctl.c
new file mode 100644
index 000000000000..40b33c990096
--- /dev/null
+++ b/drivers/soc/qcom/hyp_core_ctl.c
@@ -0,0 +1,354 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "hyp_core_ctl: " fmt
+
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/cpuhotplug.h>
+
+/**
+ * struct hyp_core_ctl_data - The private data structure of this driver
+ * @lock: spinlock to serialize task wakeup and enable/reserve_cpus
+ * @task: task_struct pointer to the thread running the state machine
+ * @pending: state machine work pending status
+ * @reservation_enabled: status of the reservation
+ *
+ * @reserve_cpus: The CPUs to be reserved. input.
+ * @our_isolated_cpus: The CPUs isolated by hyp_core_ctl driver. output.
+ * @final_reserved_cpus: The CPUs reserved for the Hypervisor. output.
+ *
+ */
+struct hyp_core_ctl_data {
+ spinlock_t lock;
+ struct task_struct *task;
+ bool pending;
+ bool reservation_enabled;
+ cpumask_t reserve_cpus;
+ cpumask_t our_isolated_cpus;
+ cpumask_t final_reserved_cpus;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/hyp_core_ctl.h>
+
+static struct hyp_core_ctl_data *the_hcd;
+
+static char reserve_cpus_param[32] = CONFIG_QCOM_HYP_CORE_CTL_RESERVE_CPUS;
+
+static struct kparam_string reserve_cpus_arg = {
+ .maxlen = sizeof(reserve_cpus_param),
+ .string = reserve_cpus_param,
+};
+
+static int set_reserve_cpus(const char *buf, const struct kernel_param *kp)
+{
+ int ret;
+
+ if (!the_hcd || the_hcd->reservation_enabled)
+ return -EPERM;
+
+ ret = param_set_copystring(buf, kp);
+ if (ret < 0)
+ return ret;
+
+ ret = cpulist_parse(reserve_cpus_param, &the_hcd->reserve_cpus);
+ if (ret < 0) {
+ pr_err("fail to set reserve_cpus_param. err=%d\n", ret);
+ return -EINVAL;
+ }
+
+ pr_debug("reserve_cpumask_param is set to %*pbl\n",
+ cpumask_pr_args(&the_hcd->reserve_cpus));
+
+ return 0;
+}
+
+/*
+ * Since this driver is built statically, the sysfs files corresponding
+ * to the module param can be accessed even when the init routine
+ * fails. Implement the get methods to return error in such scenario.
+ */
+static int get_reserve_cpus(char *buffer, const struct kernel_param *kp)
+{
+ if (!the_hcd)
+ return -ENODEV;
+
+ return param_get_string(buffer, kp);
+}
+
+static const struct kernel_param_ops reserve_cpus_ops = {
+ .set = set_reserve_cpus,
+ .get = get_reserve_cpus,
+};
+module_param_cb(reserve_cpus, &reserve_cpus_ops, &reserve_cpus_arg, 0644);
+
+static int hyp_core_ctl_enable(bool enable)
+{
+ int ret = 0;
+
+ spin_lock(&the_hcd->lock);
+ if (enable == the_hcd->reservation_enabled)
+ goto out;
+
+ if (cpumask_empty(&the_hcd->reserve_cpus)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ trace_hyp_core_ctl_enable(enable);
+ pr_debug("reservation %s\n", enable ? "enabled" : "disabled");
+
+ the_hcd->reservation_enabled = enable;
+ the_hcd->pending = true;
+ wake_up_process(the_hcd->task);
+out:
+ spin_unlock(&the_hcd->lock);
+ return ret;
+}
+
+static bool reservation_enabled_param;
+static int set_reservation_enabled(const char *buf,
+ const struct kernel_param *kp)
+{
+ int ret;
+ bool old_val = reservation_enabled_param;
+
+ if (!the_hcd)
+ return -EPERM;
+
+ ret = param_set_bool(buf, kp);
+ if (ret < 0) {
+ pr_err("fail to set reservation_enabled_param err=%d\n", ret);
+ return ret;
+ }
+
+ ret = hyp_core_ctl_enable(reservation_enabled_param);
+ if (ret < 0) {
+ pr_err("fail to enable reservation. ret=%d\n", ret);
+ reservation_enabled_param = old_val;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int get_reservation_enabled(char *buffer, const struct kernel_param *kp)
+{
+ if (!the_hcd)
+ return -ENODEV;
+
+ return param_get_bool(buffer, kp);
+}
+
+static const struct kernel_param_ops reservertation_enabled_ops = {
+ .set = set_reservation_enabled,
+ .get = get_reservation_enabled,
+};
+module_param_cb(enable, &reservertation_enabled_ops,
+ &reservation_enabled_param, 0644);
+
+static inline void hyp_core_ctl_print_status(char *msg)
+{
+ trace_hyp_core_ctl_status(the_hcd, msg);
+
+ pr_debug("%s: reserve=%*pbl reserved=%*pbl our_isolated=%*pbl online=%*pbl isolated=%*pbl\n",
+ msg, cpumask_pr_args(&the_hcd->reserve_cpus),
+ cpumask_pr_args(&the_hcd->final_reserved_cpus),
+ cpumask_pr_args(&the_hcd->our_isolated_cpus),
+ cpumask_pr_args(cpu_online_mask),
+ cpumask_pr_args(cpu_isolated_mask));
+}
+
+static void hyp_core_ctl_undo_reservation(struct hyp_core_ctl_data *hcd)
+{
+ int cpu, ret;
+
+ hyp_core_ctl_print_status("undo_reservation_start");
+
+ for_each_cpu(cpu, &hcd->our_isolated_cpus) {
+ ret = sched_unisolate_cpu(cpu);
+ if (ret < 0) {
+ pr_err("fail to un-isolate CPU%d. ret=%d\n", cpu, ret);
+ continue;
+ }
+ cpumask_clear_cpu(cpu, &hcd->our_isolated_cpus);
+ }
+
+ hyp_core_ctl_print_status("undo_reservation_end");
+}
+
+static void finalize_reservation(struct hyp_core_ctl_data *hcd, cpumask_t *temp)
+{
+ if (cpumask_equal(temp, &hcd->final_reserved_cpus))
+ return;
+
+ cpumask_copy(&hcd->final_reserved_cpus, temp);
+}
+
+static void hyp_core_ctl_do_reservation(struct hyp_core_ctl_data *hcd)
+{
+ cpumask_t offline_cpus, iter_cpus, temp_reserved_cpus;
+ int i, ret;
+
+ cpumask_clear(&offline_cpus);
+ cpumask_clear(&temp_reserved_cpus);
+
+ hyp_core_ctl_print_status("reservation_start");
+
+ /*
+ * Iterate all reserve CPUs and isolate them if not done already.
+ * The offline CPUs can't be isolated but they are considered
+ * reserved. When an offline and reserved CPU comes online, it
+ * will be isolated to honor the reservation.
+ */
+ cpumask_andnot(&iter_cpus, &hcd->reserve_cpus, &hcd->our_isolated_cpus);
+
+ for_each_cpu(i, &iter_cpus) {
+ if (!cpu_online(i)) {
+ cpumask_set_cpu(i, &offline_cpus);
+ continue;
+ }
+
+ ret = sched_isolate_cpu(i);
+ if (ret < 0) {
+ pr_err("fail to isolate CPU%d. ret=%d\n", i, ret);
+ continue;
+ }
+ cpumask_set_cpu(i, &hcd->our_isolated_cpus);
+ }
+
+ cpumask_or(&temp_reserved_cpus, &hcd->our_isolated_cpus, &offline_cpus);
+ finalize_reservation(hcd, &temp_reserved_cpus);
+
+ hyp_core_ctl_print_status("reservation_end");
+}
+
+static int hyp_core_ctl_thread(void *data)
+{
+ struct hyp_core_ctl_data *hcd = data;
+
+ while (1) {
+ spin_lock(&hcd->lock);
+ if (!hcd->pending) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&hcd->lock);
+
+ schedule();
+
+ spin_lock(&hcd->lock);
+ set_current_state(TASK_RUNNING);
+ }
+ hcd->pending = false;
+ spin_unlock(&hcd->lock);
+
+ if (kthread_should_stop())
+ break;
+
+ if (hcd->reservation_enabled)
+ hyp_core_ctl_do_reservation(hcd);
+ else
+ hyp_core_ctl_undo_reservation(hcd);
+ }
+
+ return 0;
+}
+
+static int hyp_core_ctl_hp_offline(unsigned int cpu)
+{
+ if (!the_hcd || !the_hcd->reservation_enabled)
+ return 0;
+
+ /*
+ * A CPU can't be left in isolated state while it is
+ * going offline. So unisolate the CPU if it is
+ * isolated by us. An offline CPU is considered
+ * as reserved. So no further action is needed.
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &the_hcd->our_isolated_cpus))
+ sched_unisolate_cpu_unlocked(cpu);
+
+ return 0;
+}
+
+static int hyp_core_ctl_hp_online(unsigned int cpu)
+{
+ if (!the_hcd || !the_hcd->reservation_enabled)
+ return 0;
+
+ /*
+ * A reserved CPU is coming online. It should be isolated
+ * to honor the reservation. So kick the state machine.
+ */
+ spin_lock(&the_hcd->lock);
+ if (cpumask_test_cpu(cpu, &the_hcd->final_reserved_cpus)) {
+ the_hcd->pending = true;
+ wake_up_process(the_hcd->task);
+ }
+ spin_unlock(&the_hcd->lock);
+
+ return 0;
+}
+
+static int __init hyp_core_ctl_init(void)
+{
+ int ret;
+ struct hyp_core_ctl_data *hcd;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ hcd = kzalloc(sizeof(*hcd), GFP_KERNEL);
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = cpulist_parse(reserve_cpus_param, &hcd->reserve_cpus);
+ if (ret < 0) {
+ pr_err("Incorrect default reserve CPUs. ret=%d\n", ret);
+ goto free_hcd;
+ }
+
+ spin_lock_init(&hcd->lock);
+ hcd->task = kthread_run(hyp_core_ctl_thread, (void *) hcd,
+ "hyp_core_ctl");
+
+ if (IS_ERR(hcd->task)) {
+ ret = PTR_ERR(hcd->task);
+ goto free_hcd;
+ }
+
+ sched_setscheduler_nocheck(hcd->task, SCHED_FIFO, &param);
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "qcom/hyp_core_ctl:online",
+ hyp_core_ctl_hp_online,
+ hyp_core_ctl_hp_offline);
+ if (ret < 0) {
+ pr_err("Fail to register the hotplug callback. ret=%d\n", ret);
+ goto stop_task;
+ }
+
+ the_hcd = hcd;
+ return 0;
+
+stop_task:
+ kthread_stop(hcd->task);
+free_hcd:
+ kfree(hcd);
+out:
+ return ret;
+}
+late_initcall(hyp_core_ctl_init);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c7f5629b6c0e..32e69332e646 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1164,8 +1164,9 @@ bool icnss_is_fw_down(void)
{
if (!penv)
return false;
- else
- return test_bit(ICNSS_FW_DOWN, &penv->state);
+
+ return test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ test_bit(ICNSS_PD_RESTART, &penv->state);
}
EXPORT_SYMBOL(icnss_is_fw_down);
@@ -3474,7 +3475,6 @@ int icnss_trigger_recovery(struct device *dev)
goto out;
}
- WARN_ON(1);
icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
priv->state);
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index e8028d252d16..09bae0345a26 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -160,7 +160,9 @@ static struct ion_heap_ops carveout_heap_ops = {
.unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+static struct ion_heap *__ion_carveout_heap_create(
+ struct ion_platform_heap *heap_data,
+ bool sync)
{
struct ion_carveout_heap *carveout_heap;
int ret;
@@ -172,7 +174,8 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
+ if (sync)
+ ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
@@ -197,6 +200,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
return &carveout_heap->heap;
}
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ return __ion_carveout_heap_create(heap_data, true);
+}
+
void ion_carveout_heap_destroy(struct ion_heap *heap)
{
struct ion_carveout_heap *carveout_heap =
@@ -206,3 +214,224 @@ void ion_carveout_heap_destroy(struct ion_heap *heap)
kfree(carveout_heap);
carveout_heap = NULL;
}
+
+#include "msm/msm_ion.h"
+#include <soc/qcom/secure_buffer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_ion.h>
+
+struct ion_sc_entry {
+ struct list_head list;
+ struct ion_heap *heap;
+ u32 token;
+};
+
+struct ion_sc_heap {
+ struct ion_heap heap;
+ struct device *dev;
+ struct list_head children;
+};
+
+static struct ion_heap *ion_sc_find_child(struct ion_heap *heap, u32 flags)
+{
+ struct ion_sc_heap *manager;
+ struct ion_sc_entry *entry;
+
+ manager = container_of(heap, struct ion_sc_heap, heap);
+ flags = flags & ION_FLAGS_CP_MASK;
+ list_for_each_entry(entry, &manager->children, list) {
+ if (entry->token == flags)
+ return entry->heap;
+ }
+ return NULL;
+}
+
+static int ion_sc_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags) {
+ struct ion_heap *child;
+
+ /* cache maintenance is not possible on secure memory */
+ flags &= ~((unsigned long)ION_FLAG_CACHED);
+ buffer->flags = flags;
+
+ child = ion_sc_find_child(heap, flags);
+ if (!child)
+ return -EINVAL;
+
+ return ion_carveout_heap_allocate(child, buffer, len, align, flags);
+}
+
+static void ion_sc_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *child;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ child = ion_sc_find_child(buffer->heap, buffer->flags);
+ if (!child) {
+ WARN(1, "ion_secure_carvout: invalid buffer flags on free. Memory will be leaked\n.");
+ return;
+ }
+
+ ion_carveout_free(child, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct ion_heap_ops ion_sc_heap_ops = {
+ .allocate = ion_sc_heap_allocate,
+ .free = ion_sc_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+};
+
+static int ion_sc_get_dt_token(struct ion_sc_entry *entry,
+ struct device_node *np, u64 base, u64 size)
+{
+ u32 token;
+ u32 *vmids, *modes;
+ u32 nr, i;
+ int ret = -EINVAL;
+ u32 src_vm = VMID_HLOS;
+
+ if (of_property_read_u32(np, "token", &token))
+ return -EINVAL;
+
+ nr = count_set_bits(token);
+ vmids = kcalloc(nr, sizeof(*vmids), GFP_KERNEL);
+ if (!vmids)
+ return -ENOMEM;
+
+ modes = kcalloc(nr, sizeof(*modes), GFP_KERNEL);
+ if (!modes) {
+ kfree(vmids);
+ return -ENOMEM;
+ }
+
+ if ((token & ~ION_FLAGS_CP_MASK) ||
+ populate_vm_list(token, vmids, nr)) {
+ pr_err("secure_carveout_heap: Bad token %x\n", token);
+ goto out;
+ }
+
+ for (i = 0; i < nr; i++)
+ if (vmids[i] == VMID_CP_SEC_DISPLAY)
+ modes[i] = PERM_READ;
+ else
+ modes[i] = PERM_READ | PERM_WRITE;
+
+ ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr);
+ if (ret)
+ pr_err("secure_carveout_heap: Assign token 0x%x failed\n",
+ token);
+ else
+ entry->token = token;
+out:
+ kfree(modes);
+ kfree(vmids);
+ return ret;
+}
+
+static int ion_sc_add_child(struct ion_sc_heap *manager,
+ struct device_node *np)
+{
+ struct device *dev = manager->dev;
+ struct ion_platform_heap heap_data = {0};
+ struct ion_sc_entry *entry;
+ struct device_node *phandle;
+ const __be32 *basep;
+ u64 base, size;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->list);
+
+ phandle = of_parse_phandle(np, "memory-region", 0);
+ if (!phandle)
+ goto out_free;
+
+ basep = of_get_address(phandle, 0, &size, NULL);
+ if (!basep)
+ goto out_free;
+
+ base = of_translate_address(phandle, basep);
+ if (base == OF_BAD_ADDR)
+ goto out_free;
+
+ heap_data.priv = dev;
+ heap_data.base = base;
+ heap_data.size = size;
+
+ /* This will zero memory initially */
+ entry->heap = __ion_carveout_heap_create(&heap_data, false);
+ if (IS_ERR(entry->heap))
+ goto out_free;
+
+ ret = ion_sc_get_dt_token(entry, np, base, size);
+ if (ret)
+ goto out_free_carveout;
+
+ list_add(&entry->list, &manager->children);
+ dev_info(dev, "ion_secure_carveout: creating heap@0x%llx, size 0x%llx\n",
+ base, size);
+ return 0;
+
+out_free_carveout:
+ ion_carveout_heap_destroy(entry->heap);
+out_free:
+ kfree(entry);
+ return -EINVAL;
+}
+
+void ion_secure_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_sc_heap *manager =
+ container_of(heap, struct ion_sc_heap, heap);
+ struct ion_sc_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &manager->children, list) {
+ ion_carveout_heap_destroy(entry->heap);
+ kfree(entry);
+ }
+ kfree(manager);
+}
+
+struct ion_heap *ion_secure_carveout_heap_create(
+ struct ion_platform_heap *heap_data)
+{
+ struct device *dev = heap_data->priv;
+ int ret;
+ struct ion_sc_heap *manager;
+ struct device_node *np;
+
+ manager = kzalloc(sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&manager->children);
+ manager->dev = dev;
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ion_sc_add_child(manager, np);
+ if (ret) {
+ dev_err(dev, "Creating child pool %s failed\n",
+ np->name);
+ goto err;
+ }
+ }
+
+ manager->heap.ops = &ion_sc_heap_ops;
+ manager->heap.type = ION_HEAP_TYPE_SECURE_CARVEOUT;
+ return &manager->heap;
+
+err:
+ ion_secure_carveout_heap_destroy(&manager->heap);
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 377172607311..891adfc40be1 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -122,6 +122,10 @@ static struct ion_heap_desc ion_heap_meta[] = {
{
.id = ION_SECURE_DISPLAY_HEAP_ID,
.name = ION_SECURE_DISPLAY_HEAP_NAME,
+ },
+ {
+ .id = ION_SECURE_CARVEOUT_HEAP_ID,
+ .name = ION_SECURE_CARVEOUT_HEAP_NAME,
}
};
#endif
@@ -443,6 +447,7 @@ static struct heap_types_info {
MAKE_HEAP_TYPE_MAPPING(SYSTEM),
MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+ MAKE_HEAP_TYPE_MAPPING(SECURE_CARVEOUT),
MAKE_HEAP_TYPE_MAPPING(CHUNK),
MAKE_HEAP_TYPE_MAPPING(DMA),
MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
@@ -659,7 +664,8 @@ bool is_secure_vmid_valid(int vmid)
vmid == VMID_CP_CAMERA_PREVIEW ||
vmid == VMID_CP_SPSS_SP ||
vmid == VMID_CP_SPSS_SP_SHARED ||
- vmid == VMID_CP_SPSS_HLOS_SHARED);
+ vmid == VMID_CP_SPSS_HLOS_SHARED ||
+ vmid == VMID_CP_CDSP);
}
unsigned int count_set_bits(unsigned long val)
@@ -709,6 +715,8 @@ int get_secure_vmid(unsigned long flags)
return VMID_CP_SPSS_SP_SHARED;
if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
return VMID_CP_SPSS_HLOS_SHARED;
+ if (flags & ION_FLAG_CP_CDSP)
+ return VMID_CP_CDSP;
return -EINVAL;
}
@@ -1017,6 +1025,9 @@ static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
case ION_HEAP_TYPE_HYP_CMA:
heap = ion_cma_secure_heap_create(heap_data);
break;
+ case ION_HEAP_TYPE_SECURE_CARVEOUT:
+ heap = ion_secure_carveout_heap_create(heap_data);
+ break;
default:
heap = ion_heap_create(heap_data);
}
@@ -1052,6 +1063,9 @@ static void msm_ion_heap_destroy(struct ion_heap *heap)
case ION_HEAP_TYPE_HYP_CMA:
ion_cma_secure_heap_destroy(heap);
break;
+ case ION_HEAP_TYPE_SECURE_CARVEOUT:
+ ion_secure_carveout_heap_destroy(heap);
+ break;
default:
ion_heap_destroy(heap);
}
diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h
index bbf2e8bd317f..1318d1f3d72d 100644
--- a/drivers/staging/android/ion/msm_ion_priv.h
+++ b/drivers/staging/android/ion/msm_ion_priv.h
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/msm_ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -40,6 +40,10 @@ int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+struct ion_heap *ion_secure_carveout_heap_create(
+ struct ion_platform_heap *heap);
+void ion_secure_carveout_heap_destroy(struct ion_heap *heap);
+
long msm_ion_custom_ioctl(struct ion_client *client,
unsigned int cmd,
unsigned long arg);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index d510fda91092..a6672ffca062 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -10,6 +10,7 @@ enum msm_ion_heap_types {
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
ION_HEAP_TYPE_SYSTEM_SECURE,
ION_HEAP_TYPE_HYP_CMA,
+ ION_HEAP_TYPE_SECURE_CARVEOUT,
/*
* if you add a heap type here you should also add it to
* heap_types_info[] in msm_ion.c
@@ -32,6 +33,7 @@ enum ion_heap_ids {
ION_SECURE_DISPLAY_HEAP_ID = 10,
ION_CP_MFC_HEAP_ID = 12,
ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
+ ION_SECURE_CARVEOUT_HEAP_ID = 14,
ION_CP_WB_HEAP_ID = 16, /* 8660 only */
ION_QSECOM_TA_HEAP_ID = 19,
ION_CAMERA_HEAP_ID = 20, /* 8660 only */
@@ -90,6 +92,8 @@ enum cp_mem_usage {
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
+/* ION_FLAG_ALLOW_NON_CONTIG uses ION_BIT(28) */
+#define ION_FLAG_CP_CDSP ION_BIT(29)
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
/**
@@ -131,6 +135,7 @@ enum cp_mem_usage {
#define ION_IOMMU_HEAP_NAME "iommu"
#define ION_MFC_HEAP_NAME "mfc"
#define ION_SPSS_HEAP_NAME "spss"
+#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout"
#define ION_WB_HEAP_NAME "wb"
#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
#define ION_PIL1_HEAP_NAME "pil_1"
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 95103054c0e4..53e3186ead70 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -41,6 +41,9 @@ config VT
If unsure, say Y, or else you won't be able to do much with your new
shiny Linux system :-)
+config TTY_FLUSH_LOCAL_ECHO
+ bool
+
config CONSOLE_TRANSLATIONS
depends on VT
default y
@@ -455,4 +458,21 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config OKL4_VTTY
+ bool "Virtual TTY on the OKL4 Microvisor"
+ depends on OKL4_GUEST
+ select TTY_FLUSH_LOCAL_ECHO
+ default y
+ ---help---
+ This device provides character-level read-write access
+ to the virtual console, usually connected to a serial-server which
+ multiplexes output on a physical UART.
+
+config OKL4_VTTY_CONSOLE
+ bool "Console on OKL4 VTTY"
+ depends on OKL4_VTTY
+ default y
+ help
+ Console support for OKL4 Microvisor virtual ttys.
+
endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 5817e2397463..6ed793d9a92f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_DA_TTY) += metag_da.o
obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
obj-y += ipwireless/
+obj-$(CONFIG_OKL4_VTTY) += okl4_vtty.o
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index faf50df81622..f2e54307093e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -126,6 +126,10 @@ struct n_tty_data {
struct mutex output_lock;
};
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work);
+#endif
+
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
@@ -740,6 +744,16 @@ static size_t __process_echoes(struct tty_struct *tty)
tail++;
}
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (ldata->echo_commit != tail) {
+ if (!tty->delayed_work) {
+ INIT_DELAYED_WORK(&tty->echo_delayed_work, continue_process_echoes);
+ schedule_delayed_work(&tty->echo_delayed_work, 1);
+ }
+ tty->delayed_work = 1;
+ }
+#endif
+
ldata->echo_tail = tail;
return old_space - space;
}
@@ -802,6 +816,20 @@ static void flush_echoes(struct tty_struct *tty)
mutex_unlock(&ldata->output_lock);
}
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, echo_delayed_work.work);
+ struct n_tty_data *ldata = tty->disc_data;
+
+ mutex_lock(&ldata->output_lock);
+ tty->delayed_work = 0;
+ __process_echoes(tty);
+ mutex_unlock(&ldata->output_lock);
+}
+#endif
+
/**
* add_echo_byte - add a byte to the echo buffer
* @c: unicode byte to echo
diff --git a/drivers/tty/okl4_vtty.c b/drivers/tty/okl4_vtty.c
new file mode 100644
index 000000000000..75e84b37fddb
--- /dev/null
+++ b/drivers/tty/okl4_vtty.c
@@ -0,0 +1,882 @@
+/*
+ * drivers/char/okl4_vtty.c
+ *
+ * Copyright (c) 2012-2014 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ * Copyright (c) 2014-2017 Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Virtual TTY driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "vtty%d", where %d is the tty number, which must be
+ * unique and less than MAX_VTTYS.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <clocksource/arm_arch_timer.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if 0
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#define DRIVER_NAME "okl4-vtty"
+#define DEVICE_NAME "vtty"
+#define DEVICE_PREFIX "ttyV"
+
+/* FIXME: Jira ticket SDK-138 - philipd. */
+#define MAX_VTTYS 8
+
+struct vtty_port {
+ bool exists;
+ int vtty_id;
+
+ bool read_throttled, write_full, irq_registered;
+ struct work_struct read_work;
+ spinlock_t write_lock;
+
+ /*
+ * Buffer length is max_msg_size plus one u32, which encodes the
+ * message length.
+ */
+ char *read_buf;
+ int read_buf_pos, read_buf_len;
+ char *write_buf;
+ int write_buffered;
+ size_t max_msg_size;
+
+ okl4_kcap_t pipe_tx_kcap;
+ okl4_kcap_t pipe_rx_kcap;
+ int tx_irq;
+ int rx_irq;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ struct console console;
+#endif
+
+ struct device *dev;
+ struct tty_port port;
+};
+
+static struct workqueue_struct *read_workqueue;
+
+static struct vtty_port ports[MAX_VTTYS];
+
+static void
+vtty_read_irq(struct vtty_port *port)
+{
+ queue_work(read_workqueue, &port->read_work);
+}
+
+static int
+do_pipe_write(struct vtty_port *port, int count)
+{
+ okl4_error_t ret;
+ int send;
+
+ if (port->write_full)
+ return 0;
+
+ BUG_ON(count > port->max_msg_size);
+
+ *(u32 *)port->write_buf = count;
+ send = roundup(count + sizeof(u32), sizeof(u32));
+
+ ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+ (void *)port->write_buf);
+
+ if (ret == OKL4_ERROR_PIPE_NOT_READY) {
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ _okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+
+ ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+ (void *)port->write_buf);
+ }
+
+ if (ret == OKL4_ERROR_PIPE_FULL ||
+ ret == OKL4_ERROR_PIPE_NOT_READY) {
+ port->write_full = true;
+ return 0;
+ }
+
+ if (ret != OKL4_OK)
+ return -EIO;
+
+ return count;
+}
+
+static void
+vtty_write_irq(struct vtty_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+
+ spin_lock(&port->write_lock);
+
+ port->write_full = false;
+
+ if (port->write_buffered &&
+ do_pipe_write(port, port->write_buffered) > 0)
+ port->write_buffered = 0;
+
+ if (tty)
+ tty_wakeup(tty);
+
+ spin_unlock(&port->write_lock);
+
+ tty_kref_put(tty);
+}
+
+static irqreturn_t
+vtty_tx_irq(int irq, void *dev)
+{
+ struct vtty_port *port = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ if (okl4_pipe_state_gettxavailable(&payload))
+ vtty_write_irq(port);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+vtty_rx_irq(int irq, void *dev)
+{
+ struct vtty_port *port = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ if (okl4_pipe_state_getrxavailable(&payload))
+ vtty_read_irq(port);
+
+ return IRQ_HANDLED;
+}
+
+static int
+vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ int port_num = tty->index;
+ struct vtty_port *port;
+ int status;
+
+ if (port_num < 0 || port_num >= MAX_VTTYS)
+ return -ENXIO;
+
+ port = &ports[port_num];
+ if (!port->exists)
+ return -ENODEV;
+
+ tty->driver_data = port;
+
+ port->write_full = false;
+ port->read_throttled = false;
+ port->write_buffered = 0;
+
+ /*
+ * low_latency forces all tty read handling to be done by the
+ * read task.
+ */
+ port->port.low_latency = 1;
+
+ if (!port->irq_registered) {
+ status = devm_request_irq(port->dev, port->tx_irq,
+ vtty_tx_irq, 0, dev_name(port->dev), port);
+ if (status)
+ return status;
+
+ status = devm_request_irq(port->dev, port->rx_irq,
+ vtty_rx_irq, 0, dev_name(port->dev), port);
+ if (status) {
+ devm_free_irq(port->dev, port->tx_irq, port);
+ return status;
+ }
+
+ port->irq_registered = true;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ tty->port = &port->port;
+ tty_standard_install(driver, tty);
+#else
+ tty->port = &port->port;
+ if (tty_init_termios(tty) != 0)
+ return -ENOMEM;
+
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+#endif
+
+ return 0;
+}
+
+static int
+vtty_open(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ _okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+ okl4_pipe_control_setoperation(&x,
+ OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+ _okl4_sys_pipe_control(port->pipe_rx_kcap, x);
+
+ return tty_port_open(&port->port, tty, file);
+}
+
+static void
+vtty_close(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, file);
+}
+
+static int
+vtty_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct vtty_port *vtty_port = tty->driver_data;
+
+ /* Run the read task immediately to drain the channel */
+ queue_work(read_workqueue, &vtty_port->read_work);
+
+ return 0;
+}
+
+static void
+vtty_shutdown(struct tty_port *port)
+{
+ struct vtty_port *vtty_port =
+ container_of(port, struct vtty_port, port);
+
+ cancel_work_sync(&vtty_port->read_work);
+}
+
+static int
+do_vtty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct vtty_port *port = tty->driver_data;
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->write_lock, flags);
+
+ /* If we have a whole message, try to send it */
+ if (port->write_buffered == 0 && count >= port->max_msg_size) {
+ if (count > port->max_msg_size)
+ count = port->max_msg_size;
+
+ memcpy(&port->write_buf[sizeof(u32)], buf, count);
+ retval = do_pipe_write(port, count);
+ count -= retval;
+ }
+
+ /* If nothing was sent yet, buffer the data */
+ if (!retval) {
+ /* Determine how much data will fit in the buffer */
+ if (count > port->max_msg_size - port->write_buffered)
+ count = port->max_msg_size - port->write_buffered;
+
+ /* Copy into the buffer if possible */
+ if (count) {
+ memcpy(&port->write_buf[sizeof(u32) +
+ port->write_buffered], buf, count);
+ port->write_buffered += count;
+ retval = count;
+ }
+
+ /* Flush the buffer if it is full */
+ if (port->write_buffered == port->max_msg_size) {
+ if (do_pipe_write(port, port->write_buffered) > 0)
+ port->write_buffered = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->write_lock, flags);
+
+ return retval;
+}
+
+static void
+vtty_flush_chars(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->write_lock, flags);
+
+ if (port->write_buffered && do_pipe_write(port,
+ port->write_buffered) > 0) {
+ port->write_buffered = 0;
+ tty_wakeup(tty);
+ }
+
+ spin_unlock_irqrestore(&port->write_lock, flags);
+}
+
+static int
+vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ return do_vtty_write(tty, &ch, 1);
+}
+
+static int
+vtty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int retval;
+
+ retval = do_vtty_write(tty, buf, count);
+ vtty_flush_chars(tty);
+
+ return retval;
+}
+
+static int
+vtty_write_room(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ /*
+ * If the channel is full, we have to buffer writes locally. While
+ * vtty_write() can handle that, we may as well tell the ldisc to wait
+ * for the channel to drain, so we return 0 here.
+ */
+ return port->write_full ? 0 : port->max_msg_size - port->write_buffered;
+}
+
+static int
+vtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ return port->max_msg_size - vtty_write_room(tty);
+}
+
+static void
+vtty_throttle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ port->read_throttled = true;
+}
+
+static void
+vtty_unthrottle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ port->read_throttled = false;
+ queue_work(read_workqueue, &port->read_work);
+}
+
+static const struct tty_port_operations vtty_port_ops = {
+ .activate = vtty_activate,
+ .shutdown = vtty_shutdown,
+};
+
+static int vtty_proc_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_puts(m, "okl4vttyinfo:1.0 driver:1.0\n");
+ for (i = 0; i < sizeof(ports)/sizeof(ports[0]); i++) {
+ struct vtty_port *port = &ports[i];
+
+ if (!port->exists)
+ continue;
+ seq_printf(m, "%d: tx_kcap: %d tx_irq: %d rx_kcap: %d rx_irq: %d\n",
+ i, port->pipe_tx_kcap, port->tx_irq, port->pipe_rx_kcap, port->rx_irq);
+ }
+
+ return 0;
+}
+
+static int vtty_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vtty_proc_show, NULL);
+}
+
+static const struct file_operations vtty_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = vtty_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct tty_operations vtty_ops = {
+ .install = vtty_install,
+ .open = vtty_open,
+ .close = vtty_close,
+ .write = vtty_write,
+ .put_char = vtty_put_char,
+ .flush_chars = vtty_flush_chars,
+ .write_room = vtty_write_room,
+ .chars_in_buffer = vtty_chars_in_buffer,
+ .throttle = vtty_throttle,
+ .unthrottle = vtty_unthrottle,
+ .proc_fops = &vtty_proc_fops,
+};
+
+static void
+vtty_read_task(struct work_struct *work)
+{
+ struct vtty_port *port = container_of(work, struct vtty_port,
+ read_work);
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ bool pushed = false;
+
+ if (!tty)
+ return;
+
+ while (true) {
+ struct _okl4_sys_pipe_recv_return ret_recv;
+ int space, len;
+
+ /* Stop reading if we are throttled. */
+ if (port->read_throttled)
+ break;
+
+ /* Find out how much space we have in the tty buffer. */
+ space = tty_buffer_request_room(&port->port,
+ port->max_msg_size);
+
+ if (space == 0) {
+ BUG_ON(pushed);
+ tty_flip_buffer_push(&port->port);
+ pushed = true;
+ continue;
+ } else {
+ pushed = false;
+ }
+
+ if (port->read_buf_pos == port->read_buf_len) {
+ /*
+ * We have run out of chars in our message buffer.
+ * Check whether there are any more messages in the
+ * queue.
+ */
+
+ ret_recv = _okl4_sys_pipe_recv(port->pipe_rx_kcap,
+ port->max_msg_size + sizeof(u32),
+ (void *)port->read_buf);
+ if (ret_recv.error == OKL4_ERROR_PIPE_NOT_READY) {
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x,
+ OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+ _okl4_sys_pipe_control(port->pipe_rx_kcap, x);
+
+ ret_recv = _okl4_sys_pipe_recv(port->pipe_rx_kcap,
+ port->max_msg_size + sizeof(u32),
+ (void *)port->read_buf);
+ }
+ if (ret_recv.error == OKL4_ERROR_PIPE_EMPTY ||
+ ret_recv.error == OKL4_ERROR_PIPE_NOT_READY) {
+ port->read_buf_pos = 0;
+ port->read_buf_len = 0;
+ break;
+ }
+
+ if (ret_recv.error != OKL4_OK) {
+ dev_err(port->dev,
+ "pipe receive returned error %d in vtty driver !\n",
+ (int)ret_recv.error);
+ port->read_buf_pos = 0;
+ port->read_buf_len = 0;
+ break;
+ }
+
+ port->read_buf_pos = sizeof(uint32_t);
+ port->read_buf_len = sizeof(uint32_t) +
+ *(uint32_t *)port->read_buf;
+ }
+
+ /* Send chars to tty layer. */
+ len = port->read_buf_len - port->read_buf_pos;
+ if (len > space)
+ len = space;
+
+ tty_insert_flip_string(&port->port, port->read_buf +
+ port->read_buf_pos, len);
+ port->read_buf_pos += len;
+ }
+
+ tty_flip_buffer_push(&port->port);
+
+ tty_kref_put(tty);
+}
+
+static struct tty_driver *vtty_driver;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options);
+static void vconsole_write(struct console *co, const char *p, unsigned count);
+static struct tty_driver *vconsole_device(struct console *co, int *index);
+#endif
+
+static int
+vtty_probe(struct platform_device *pdev)
+{
+ struct vtty_port *vtty_port;
+ struct device *tty_dev;
+ u32 reg[2];
+ int vtty_id, irq, err;
+
+ vtty_id = of_alias_get_id(pdev->dev.of_node, "vserial");
+ if (vtty_id < 0)
+ vtty_id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (vtty_id < 0 || vtty_id >= MAX_VTTYS) {
+ err = -ENXIO;
+ goto fail_vtty_id;
+ }
+
+ vtty_port = &ports[vtty_id];
+ if (vtty_port->exists) {
+ dev_err(&pdev->dev, "vtty port already exists\n");
+ err = -ENODEV;
+ goto fail_vtty_id;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+ dev_err(&pdev->dev, "need 2 reg resources\n");
+ err = -ENODEV;
+ goto fail_vtty_id;
+ }
+
+ dev_set_drvdata(&pdev->dev, vtty_port);
+
+ /* Set up and register the tty port */
+ vtty_port->dev = &pdev->dev;
+ vtty_port->vtty_id = vtty_id;
+ tty_port_init(&vtty_port->port);
+ vtty_port->port.ops = &vtty_port_ops;
+
+ vtty_port->pipe_tx_kcap = reg[0];
+ vtty_port->pipe_rx_kcap = reg[1];
+ vtty_port->max_msg_size = 32;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no tx irq resource?\n");
+ err = -ENODEV;
+ goto fail_of;
+ }
+ vtty_port->tx_irq = irq;
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no rx irq resource?\n");
+ err = -ENODEV;
+ goto fail_of;
+ }
+ vtty_port->rx_irq = irq;
+
+ vtty_port->exists = true;
+
+ spin_lock_init(&vtty_port->write_lock);
+ INIT_WORK(&vtty_port->read_work, vtty_read_task);
+
+ vtty_port->read_buf = kmalloc(vtty_port->max_msg_size + sizeof(u32),
+ GFP_KERNEL);
+ if (!vtty_port->read_buf) {
+ dev_err(&pdev->dev, "%s: bad kmalloc\n", __func__);
+ err = -ENOMEM;
+ goto fail_malloc_read;
+ }
+ vtty_port->read_buf_pos = 0;
+ vtty_port->read_buf_len = 0;
+
+ vtty_port->write_buf = kmalloc(vtty_port->max_msg_size + sizeof(u32),
+ GFP_KERNEL);
+ if (!vtty_port->write_buf) {
+ dev_err(&pdev->dev, "%s: bad kmalloc\n", __func__);
+ err = -ENOMEM;
+ goto fail_malloc_write;
+ }
+
+ tty_dev = tty_register_device(vtty_driver, vtty_id, &pdev->dev);
+ if (IS_ERR(tty_dev)) {
+ dev_err(&pdev->dev, "%s: can't register "DEVICE_NAME"%d: %ld",
+ __func__, vtty_id, PTR_ERR(tty_dev));
+ err = PTR_ERR(tty_dev);
+ goto fail_tty_register;
+ }
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ /* Set up and register the port's console device */
+ strlcpy(vtty_port->console.name, DEVICE_PREFIX,
+ sizeof(vtty_port->console.name));
+ vtty_port->console.write = vconsole_write;
+ vtty_port->console.flags = CON_PRINTBUFFER;
+ vtty_port->console.device = vconsole_device;
+ vtty_port->console.setup = vconsole_setup;
+ vtty_port->console.index = vtty_id;
+
+ register_console(&vtty_port->console);
+#endif
+
+ return 0;
+
+fail_tty_register:
+ kfree(vtty_port->write_buf);
+fail_malloc_write:
+ kfree(vtty_port->read_buf);
+ vtty_port->exists = false;
+fail_of:
+fail_vtty_id:
+fail_malloc_read:
+ dev_set_drvdata(&pdev->dev, NULL);
+ return err;
+}
+
+static int
+vtty_remove(struct platform_device *pdev)
+{
+ struct vtty_port *vtty_port = dev_get_drvdata(&pdev->dev);
+
+ if (!vtty_port->exists)
+ return -ENOENT;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ unregister_console(&vtty_port->console);
+#endif
+ tty_unregister_device(vtty_driver, vtty_port->vtty_id);
+ vtty_port->exists = false;
+ kfree(vtty_port->write_buf);
+ kfree(vtty_port->read_buf);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ devm_kfree(&pdev->dev, vtty_port);
+
+ return 0;
+}
+
+static const struct of_device_id vtty_match[] = {
+ {
+ .compatible = "okl,pipe-tty",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vtty_match);
+
+static struct platform_driver driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vtty_match,
+ },
+ .probe = vtty_probe,
+ .remove = vtty_remove,
+};
+
+
+static int __init vtty_init(void)
+{
+ int err;
+
+ /* Allocate workqueue */
+ read_workqueue = create_workqueue("okl4vtty");
+ if (read_workqueue == NULL) {
+ err = -ENOMEM;
+ goto fail_create_workqueue;
+ }
+
+ /* Set up the tty driver. */
+ vtty_driver = alloc_tty_driver(MAX_VTTYS);
+ if (vtty_driver == NULL) {
+ err = -ENOMEM;
+ goto fail_alloc_tty_driver;
+ }
+
+ vtty_driver->owner = THIS_MODULE;
+ vtty_driver->driver_name = DRIVER_NAME;
+ vtty_driver->name = DEVICE_PREFIX;
+ vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+ vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ vtty_driver->init_termios = tty_std_termios;
+
+ /* These flags don't really matter; just use sensible defaults. */
+ vtty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ vtty_driver->init_termios.c_ispeed = 9600;
+ vtty_driver->init_termios.c_ospeed = 9600;
+
+ tty_set_operations(vtty_driver, &vtty_ops);
+
+ err = tty_register_driver(vtty_driver);
+ if (err)
+ goto fail_tty_driver_register;
+
+ err = platform_driver_register(&driver);
+ if (err)
+ goto fail_mv_driver_register;
+
+ return 0;
+
+fail_mv_driver_register:
+ tty_unregister_driver(vtty_driver);
+fail_tty_driver_register:
+ put_tty_driver(vtty_driver);
+ vtty_driver = NULL;
+fail_alloc_tty_driver:
+ destroy_workqueue(read_workqueue);
+ read_workqueue = NULL;
+fail_create_workqueue:
+ return err;
+}
+
+static void __exit vtty_exit(void)
+{
+ platform_driver_unregister(&driver);
+
+ tty_unregister_driver(vtty_driver);
+ put_tty_driver(vtty_driver);
+ vtty_driver = NULL;
+ destroy_workqueue(read_workqueue);
+ read_workqueue = NULL;
+}
+
+module_init(vtty_init);
+module_exit(vtty_exit);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+
+static u32 cycle_limit = 0;
+
+static int
+vconsole_setup(struct console *co, char *options)
+{
+ struct vtty_port *port;
+
+ if (co->index < 0 || co->index >= MAX_VTTYS)
+ co->index = 0;
+
+ port = &ports[co->index];
+ if (!port->exists)
+ return -ENODEV;
+
+ cycle_limit = arch_timer_get_rate() * 20 / MSEC_PER_SEC;
+ if (cycle_limit == 0) {
+ cycle_limit = -1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static void
+vconsole_write(struct console *co, const char *p, unsigned count)
+{
+ struct vtty_port *port = &ports[co->index];
+ size_t bytes_remaining = count;
+ char buf[port->max_msg_size + sizeof(u32)];
+ cycles_t last_sent_start = get_cycles();
+ static int pipe_full = 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ while (bytes_remaining > 0) {
+ unsigned to_send = min(port->max_msg_size, bytes_remaining);
+ unsigned send = roundup(to_send + sizeof(u32), sizeof(u32));
+ okl4_error_t ret;
+
+ *(u32 *)buf = to_send;
+ memcpy(&buf[sizeof(u32)], p, to_send);
+
+ ret = _okl4_sys_pipe_send(port->pipe_tx_kcap, send,
+ (void *)buf);
+
+ if (ret == OKL4_ERROR_PIPE_NOT_READY) {
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ _okl4_sys_pipe_control(port->pipe_tx_kcap, x);
+ continue;
+ }
+
+ if (ret == OKL4_ERROR_PIPE_FULL) {
+ cycles_t last_sent_cycles = get_cycles() -
+ last_sent_start;
+ if (last_sent_cycles > cycle_limit || pipe_full) {
+ pipe_full = 1;
+ return;
+ }
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+ _okl4_sys_priority_waive(vcpu_prio_normal);
+#else
+ _okl4_sys_priority_waive(0);
+#endif
+ continue;
+ }
+
+ if (ret != OKL4_OK) {
+ /*
+ * We cannot call printk here since that will end up
+ * calling back here and make things worse. We just
+ * have to return and hope that the problem corrects
+ * itself.
+ */
+ return;
+ }
+
+ p += to_send;
+ bytes_remaining -= to_send;
+ last_sent_start = get_cycles();
+ pipe_full = 0;
+ }
+}
+
+struct tty_driver *
+vconsole_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return vtty_driver;
+}
+
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+MODULE_DESCRIPTION("OKL4 virtual TTY driver");
+MODULE_AUTHOR("Philip Derrin <philipd@ok-labs.com>");
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 000000000000..16b3bda86a9f
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+ tristate "OKL4 Virtual Services support"
+ default OKL4_GUEST || OKL4_VIRTUALISATION
+ select HOTPLUG
+ help
+ This option adds core support for OKL4 Virtual Services. The Virtual
+ Services framework is an inter-OS device/service sharing
+ protocol which is supported on OKL4 Microvisor virtualization
+ platforms. You will also need drivers from the following menu in
+ order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+ bool "Virtual Services user-space service API"
+ default y
+ help
+ Select this if you want to use user-space service drivers. You will
+ also need udev rules that create device nodes, and protocol code
+ generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+ bool "Virtual Services debugging support"
+ help
+ Select this if you want to enable Virtual Services core framework
+ debugging. The debug messages for various components of the Virtual
+ Services core framework can be toggled at runtime on a per-session
+ basis via sysfs. When Virtual Services debugging is enabled here,
+ but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+ bool "Debug Virtual Services state locks"
+ default DEBUG_KERNEL
+ help
+ This option enables some runtime checks that Virtual Services
+ state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+ tristate "Virtual Services server support"
+ depends on SYSFS
+ default y
+ help
+ This option adds support for Virtual Services servers, which allows
+ exporting of services from this Linux to other environments. Servers
+ are created at runtime by writing to files in
+ /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+ tristate "Virtual Services client support"
+ default y
+ help
+ This option adds support for Virtual Services clients, which allows
+ connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+ tristate "Virtual Services skeleton driver"
+ depends on VSERVICES_SERVER || VSERVICES_CLIENT
+ default n
+ help
+ This option adds support for a skeleton virtual service driver. This
+ driver can be used for templating or testing of virtual service
+ drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+ bool "Virtual Services use named device node in /dev"
+ default n
+ help
+ Select this if you want to use a named device name over a numeric
+ device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 000000000000..97eba53df5dd
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 000000000000..685ba0a961af
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += transport/
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 000000000000..5f6926dc9f78
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ return create_singlethread_workqueue(name);
+#else
+ return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ typeof(z) _max3 = (z); \
+ (void) (&_max1 == &_max2); \
+ (void) (&_max1 == &_max3); \
+ _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+ (_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 000000000000..4cc78ac13e49
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,733 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ * - automatically connecting to the server when it becomes ready;
+ * - sending a reset command to the server if something has gone wrong; and
+ * - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+ struct vs_client_core_state state;
+ struct vs_service_device *service;
+
+ struct list_head message_queue;
+ struct mutex message_queue_lock;
+ struct work_struct message_queue_work;
+};
+
+struct pending_reset {
+ struct vs_service_device *service;
+ struct list_head list;
+};
+
+#define to_core_client(x) container_of(x, struct core_client, state)
+#define dev_to_core_client(x) to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ /* Force a transport level reset */
+ dev_err(&client->service->dev," Fatal error - resetting session\n");
+ return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return NULL;
+
+ return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *core_service = to_vs_service_device(dev);
+ struct vs_session_device *session =
+ vs_service_get_session(core_service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ /* Writing a valid service id to this file resets that service */
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -ENODEV;
+
+ err = vs_service_reset(target, core_service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+ client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+ &dev_attr_reset_service.attr,
+ NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+ .attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+ u32 service_id)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct vs_service_device *service;
+ int ret;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service)
+ return -EINVAL;
+
+ ret = vs_service_handle_delete(service);
+ vs_put_service(service);
+ return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+ struct vs_session_device *session, vs_service_id_t service_id,
+ struct vs_string *protocol_name_string,
+ struct vs_string *service_name_string)
+{
+ char *protocol_name, *service_name;
+ struct vs_service_device *service;
+ int ret = 0;
+
+ protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+ if (!protocol_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+ if (!service_name) {
+ ret = -ENOMEM;
+ goto out_free_protocol_name;
+ }
+
+ service = vs_service_register(session, client->service, service_id,
+ protocol_name, service_name, NULL);
+ if (IS_ERR(service)) {
+ ret = PTR_ERR(service);
+ goto out_free_service_name;
+ }
+
+ vs_service_start(service);
+
+out_free_service_name:
+ kfree(service_name);
+out_free_protocol_name:
+ kfree(protocol_name);
+out:
+ return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+ u32 service_id, struct vs_string service_name,
+ struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+ vs_service_get_session(client->service),
+ &client->service->dev, "Service info for %d received\n",
+ service_id);
+
+ err = vs_client_core_create_service(client, session, service_id,
+ &protocol_name, &service_name);
+ if (err)
+ dev_err(&session->dev,
+ "Failed to create service with id %d: %d\n",
+ service_id, err);
+
+ vs_client_core_core_free_service_created(state, &service_name,
+ &protocol_name, mbuf);
+
+ return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+ struct vs_service_device *service)
+{
+ return vs_client_core_core_send_service_reset(&client->state,
+ service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_client *client =
+ vs_client_session_core_client(session);
+ struct pending_reset *msg;
+
+ if (!client)
+ return -ENODEV;
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending reset for service %d\n", service->id);
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&client->message_queue_lock);
+
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+ list_add_tail(&msg->list, &client->message_queue);
+
+ mutex_unlock(&client->message_queue_lock);
+ queue_work(client->service->work_queue, &client->message_queue_work);
+
+ return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ queue_work(client->service->work_queue, &client->message_queue_work);
+
+ return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+ struct core_client *client = container_of(work, struct core_client,
+ message_queue_work);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct pending_reset *msg;
+ int err;
+
+ vs_service_state_lock(client->service);
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+ vs_service_state_unlock(client->service);
+ return;
+ }
+
+ vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+ mutex_lock(&client->message_queue_lock);
+ while (!list_empty(&client->message_queue)) {
+ msg = list_first_entry(&client->message_queue,
+ struct pending_reset, list);
+
+ err = vs_client_core_send_service_reset(client, msg->service);
+
+ /* If we're out of quota there's no point continuing */
+ if (err == -ENOBUFS)
+ break;
+
+ /* Any other error is fatal */
+ if (err < 0) {
+ dev_err(&client->service->dev,
+ "Failed to send pending reset for %d (%d) - resetting session",
+ msg->service->id, err);
+ vs_service_reset_nosync(client->service);
+ break;
+ }
+
+ /*
+ * The message sent successfully - remove it from the queue.
+ * The corresponding vs_get_service() was done when the pending
+ * message was enqueued.
+ */
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&client->message_queue_lock);
+ vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+ u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+ u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session;
+ struct vs_service_device *service;
+ int ret;
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ if (!in_quota || !out_quota)
+ return -EINVAL;
+
+ session = vs_service_get_session(client->service);
+ service = vs_session_get_service(session, service_id);
+ if (!service)
+ return -EINVAL;
+
+ service->send_quota = in_quota;
+ service->recv_quota = out_quota;
+ service->notify_send_offset = in_bit_offset;
+ service->notify_send_bits = in_num_bits;
+ service->notify_recv_offset = out_bit_offset;
+ service->notify_recv_bits = out_num_bits;
+
+ ret = vs_service_enable(service);
+ vs_put_service(service);
+ return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+ u32 service_id)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session;
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ session = vs_service_get_session(client->service);
+
+ return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+
+ /* FIXME - start callback should return int */
+ vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+ "Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct pending_reset *msg;
+
+ /* Flush the pending resets - we're about to delete everything */
+ while (!list_empty(&client->message_queue)) {
+ msg = list_first_entry(&client->message_queue,
+ struct pending_reset, list);
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+
+ vs_session_delete_noncore(session);
+
+ /* Return to the initial quotas, until the next startup message */
+ client->service->send_quota = 0;
+ client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+ u32 core_in_quota, u32 core_out_quota)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_service_device *service = state->service;
+ struct vs_session_device *session = vs_service_get_session(service);
+ int ret;
+
+ if (!core_in_quota || !core_out_quota)
+ return -EINVAL;
+
+ /*
+ * Update the service struct with our real quotas and tell the
+ * transport about the change
+ */
+
+ service->send_quota = core_in_quota;
+ service->recv_quota = core_out_quota;
+ ret = session->transport->vt->service_start(session->transport, service);
+ if (ret < 0)
+ return ret;
+
+ WARN_ON(!list_empty(&client->message_queue));
+
+ return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+ struct core_client *client;
+ int err;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ goto fail;
+
+ client->service = service;
+ INIT_LIST_HEAD(&client->message_queue);
+ INIT_WORK(&client->message_queue_work, message_queue_work);
+ mutex_init(&client->message_queue_lock);
+
+ err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+ if (err)
+ goto fail_free_client;
+
+ /*
+ * Default transport resources for the core service client. The
+ * server will inform us of the real quotas in the startup message.
+ * Note that it is important that the quotas never decrease, so these
+ * numbers are as small as possible.
+ */
+ service->send_quota = 0;
+ service->recv_quota = 1;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+
+ return &client->state;
+
+fail_free_client:
+ kfree(client);
+fail:
+ return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+ kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+ .alloc = vs_core_client_alloc,
+ .release = vs_core_client_release,
+ .start = vs_core_client_start,
+ .reset = vs_core_client_reset,
+ .tx_ready = vs_core_client_tx_ready,
+
+ .core = {
+ .nack_connect = vs_client_core_fatal_error,
+
+ /* FIXME: Jira ticket SDK-3074 - ryanm. */
+ .ack_disconnect = vs_client_core_fatal_error,
+ .nack_disconnect = vs_client_core_fatal_error,
+
+ .msg_service_created = vs_client_core_handle_service_created,
+ .msg_service_removed = vs_client_core_handle_service_removed,
+
+ .msg_startup = vs_core_client_startup,
+ /* FIXME: Jira ticket SDK-3074 - philipd. */
+ .msg_shutdown = vs_client_core_fatal_error,
+ .msg_server_ready = vs_client_core_handle_server_ready,
+ .msg_service_reset = vs_client_core_handle_service_reset,
+ },
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+ /* Don't match anything to the devio driver; it's bound manually */
+ if (!vsdrv->protocol)
+ return 0;
+
+ WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+ /* Match if the protocol strings are the same */
+ if (strcmp(service->protocol, vsdrv->protocol) == 0)
+ return 1;
+
+ return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static ssize_t service_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static ssize_t quota_out_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static struct device_attribute vs_client_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR(protocol, S_IRUGO, dev_protocol_show, NULL),
+ __ATTR_RO(service_name),
+ __ATTR_RO(quota_in),
+ __ATTR_RO(quota_out),
+ __ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+ struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_client_drv_attrs[] = {
+ __ATTR_RO(protocol),
+ __ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+ &driver_attr_protocol.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+#endif
+
+struct bus_type vs_client_bus_type = {
+ .name = "vservices-client",
+ .dev_attrs = vs_client_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ .drv_attrs = vs_client_drv_attrs,
+#else
+ .drv_groups = vs_client_drv_groups,
+#endif
+ .match = vs_client_bus_match,
+ .probe = vs_service_bus_probe,
+ .remove = vs_service_bus_remove,
+ .uevent = vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *service;
+ char *protocol, *name;
+ int ret = 0;
+
+ if (session->is_server) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* create a service for the core protocol client */
+ protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+ if (!protocol) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ name = kstrdup("core", GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto fail_free_protocol;
+ }
+
+ service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+ if (IS_ERR(service)) {
+ ret = PTR_ERR(service);
+ goto fail_free_name;
+ }
+
+fail_free_name:
+ kfree(name);
+fail_free_protocol:
+ kfree(protocol);
+fail:
+ return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ if (WARN_ON(service->id == 0))
+ return -EINVAL;
+
+ return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+ .driver = {
+ .name = "vservices-client-session",
+ .owner = THIS_MODULE,
+ .bus = &vs_session_bus_type,
+ .probe = vs_client_session_probe,
+ .suppress_bind_attrs = true,
+ },
+ .is_server = false,
+ .service_bus = &vs_client_bus_type,
+ .service_local_reset = vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vs_client_bus_type);
+ if (ret)
+ goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+ vs_devio_client_driver.driver.owner = THIS_MODULE;
+ ret = driver_register(&vs_devio_client_driver.driver);
+ if (ret)
+ goto fail_devio_register;
+#endif
+
+ ret = driver_register(&vs_client_session_driver.driver);
+ if (ret)
+ goto fail_driver_register;
+
+ ret = vservice_core_client_register(&vs_core_client_driver,
+ "vs_core_client");
+ if (ret)
+ goto fail_core_register;
+
+ vservices_client_root = kobject_create_and_add("client-sessions",
+ vservices_root);
+ if (!vservices_client_root) {
+ ret = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ return 0;
+
+fail_create_root:
+ vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+ driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_client_driver.driver);
+ vs_devio_client_driver.driver.bus = NULL;
+ vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+ bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+ return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+ kobject_put(vservices_client_root);
+ vservice_core_client_unregister(&vs_core_client_driver);
+ driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_client_driver.driver);
+ vs_devio_client_driver.driver.bus = NULL;
+ vs_devio_client_driver.driver.owner = NULL;
+#endif
+ bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 000000000000..76ca83c79123
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1651 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME "core"
+
+struct core_server {
+ struct vs_server_core_state state;
+ struct vs_service_device *service;
+
+ /*
+ * A list of messages to send, a mutex protecting it, and a
+ * work item to process the list.
+ */
+ struct list_head message_queue;
+ struct mutex message_queue_lock;
+ struct work_struct message_queue_work;
+
+ struct mutex alloc_lock;
+
+ /* The following are all protected by alloc_lock. */
+ unsigned long *in_notify_map;
+ int in_notify_map_bits;
+
+ unsigned long *out_notify_map;
+ int out_notify_map_bits;
+
+ unsigned in_quota_remaining;
+ unsigned out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+ vservice_core_message_id_t type;
+ struct vs_service_device *service;
+ struct list_head list;
+};
+
+#define to_core_server(x) container_of(x, struct core_server, state)
+#define dev_to_core_server(x) to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+ return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return NULL;
+
+ return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+ struct vs_service_device *service)
+{
+ return vs_server_core_core_send_service_removed(&server->state,
+ service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ list_for_each_entry(msg, &server->message_queue, list) {
+ if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+ msg->service == service) {
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+
+ /* there can only be one */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ mutex_lock(&server->message_queue_lock);
+
+ /*
+ * If we haven't sent the notification that the service was created,
+ * nuke it and do nothing else.
+ *
+ * This is not just an optimisation; see below.
+ */
+ if (cancel_pending_created(server, service)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ /*
+ * Do nothing if the core state is not connected. We must avoid
+ * queueing service_removed messages on a reset service.
+ *
+ * Note that we cannot take the core server state lock here, because
+ * we may (or may not) have been called from a core service message
+ * handler. Thus, we must beware of races with changes to this
+ * condition:
+ *
+ * - It becomes true when the req_connect handler sends an
+ * ack_connect, *after* it queues service_created for each existing
+ * service (while holding the service ready lock). The handler sends
+ * ack_connect with the message queue lock held.
+ *
+ * - If we see the service as connected, then the req_connect
+ * handler has already queued and sent a service_created for this
+ * service, so it's ok for us to send a service_removed.
+ *
+ * - If we see it as disconnected, the req_connect handler hasn't
+ * taken the message queue lock to send ack_connect yet, and thus
+ * has not released the service state lock; so if it queued a
+ * service_created we caught it in the flush above before it was
+ * sent.
+ *
+ * - It becomes false before the reset / disconnect handlers are
+ * called and those will both flush the message queue afterwards.
+ *
+ * - If we see the service as connected, then the reset / disconnect
+ * handler is going to flush the message.
+ *
+ * - If we see it disconnected, the state change has occurred and
+ * implicitly had the same effect as this message, so doing
+ * nothing is correct.
+ *
+ * Note that ordering in all of the above cases is guaranteed by the
+ * message queue lock.
+ */
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct vs_session_device *session =
+ vs_service_get_session(server->service);
+
+ struct vs_mbuf *mbuf;
+ struct vs_string service_name, protocol_name;
+ size_t service_name_len, protocol_name_len;
+
+ int err;
+
+ mbuf = vs_server_core_core_alloc_service_created(&server->state,
+ &service_name, &protocol_name, GFP_KERNEL);
+
+ if (IS_ERR(mbuf))
+ return PTR_ERR(mbuf);
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending service created message for %d (%s:%s)\n",
+ service->id, service->name, service->protocol);
+
+ service_name_len = strlen(service->name);
+ protocol_name_len = strlen(service->protocol);
+
+ if (service_name_len > vs_string_max_size(&service_name) ||
+ protocol_name_len > vs_string_max_size(&protocol_name)) {
+ dev_err(&session->dev,
+ "Invalid name/protocol for service %d (%s:%s)\n",
+ service->id, service->name,
+ service->protocol);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ vs_string_copyin(&service_name, service->name);
+ vs_string_copyin(&protocol_name, service->protocol);
+
+ err = vs_server_core_core_send_service_created(&server->state,
+ service->id, service_name, protocol_name, mbuf);
+ if (err) {
+ dev_err(&session->dev,
+ "Fatal error sending service creation message for %d (%s:%s): %d\n",
+ service->id, service->name,
+ service->protocol, err);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ vs_server_core_core_free_service_created(&server->state,
+ &service_name, &protocol_name, mbuf);
+
+ return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ lockdep_assert_held(&service->ready_lock);
+ lockdep_assert_held(&server->service->state_mutex);
+
+ mutex_lock(&server->message_queue_lock);
+
+ /* Do nothing if the core state is disconnected. */
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+ vs_service_id_t service_id, struct vs_service_device *owner,
+ const char *name, const char *protocol, const void *plat_data)
+{
+ if (!session->is_server)
+ return ERR_PTR(-ENODEV);
+
+ if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+ VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+ return ERR_PTR(-EINVAL);
+
+ /* The server core must only be registered as service_id zero */
+ if (service_id == 0 && (owner != NULL ||
+ strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+ strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+ return ERR_PTR(-EINVAL);
+
+ return vs_service_register(session, owner, service_id, protocol, name,
+ plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+ struct vs_session_device *session,
+ struct vs_service_device *owner, vs_service_id_t service_id,
+ const char *name, const char *protocol, const void *plat_data)
+{
+ struct vs_service_device *service;
+
+ service = __vs_server_core_register_service(session, service_id,
+ owner, name, protocol, plat_data);
+ if (IS_ERR(service))
+ return service;
+
+ if (protocol) {
+ vs_service_state_lock(server->service);
+ vs_service_start(service);
+ if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+ vs_service_enable(service);
+ vs_service_state_unlock(server->service);
+ }
+
+ return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+ vservice_core_message_id_t type,
+ struct vs_service_device *service)
+{
+ bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(server->service);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending %s for service %d\n",
+ is_reset ? "reset" : "ready", service->id);
+
+ if (is_reset)
+ err = vs_server_core_core_send_service_reset(&server->state,
+ service->id, GFP_KERNEL);
+ else
+ err = vs_server_core_core_send_server_ready(&server->state,
+ service->id, service->recv_quota,
+ service->send_quota,
+ service->notify_recv_offset,
+ service->notify_recv_bits,
+ service->notify_send_offset,
+ service->notify_send_bits,
+ GFP_KERNEL);
+
+ return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ list_for_each_entry(msg, &server->message_queue, list) {
+ if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+ msg->service == service) {
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+
+ /* there can only be one */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+ vservice_core_message_id_t type,
+ struct vs_service_device *service)
+{
+ bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+ struct pending_message *msg;
+
+ mutex_lock(&server->message_queue_lock);
+
+ /*
+ * If this is a reset, and there is an outgoing ready in the
+ * queue, we must cancel it so it can't be sent with invalid
+ * transport resources, and then return immediately so we
+ * don't send a redundant reset.
+ */
+ if (is_reset && cancel_pending_ready(server, service)) {
+ mutex_unlock(&server->message_queue_lock);
+ return VS_SERVICE_ALREADY_RESET;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = type;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(server->service);
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+ struct core_server *server = container_of(work, struct core_server,
+ message_queue_work);
+ struct pending_message *msg;
+ int err;
+
+ vs_service_state_lock(server->service);
+
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ vs_service_state_unlock(server->service);
+ return;
+ }
+
+ /*
+ * If any pending message fails we exit the loop immediately so that
+ * we preserve the message order.
+ */
+ mutex_lock(&server->message_queue_lock);
+ while (!list_empty(&server->message_queue)) {
+ msg = list_first_entry(&server->message_queue,
+ struct pending_message, list);
+
+ switch (msg->type) {
+ case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+ err = vs_server_core_send_service_created(server,
+ msg->service);
+ break;
+
+ case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+ err = vs_server_core_send_service_removed(server,
+ msg->service);
+ break;
+
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+ err = vs_server_core_send_service_reset_ready(
+ server, msg->type, msg->service);
+ break;
+
+ default:
+ dev_warn(&server->service->dev,
+ "Don't know how to handle pending message type %d\n",
+ msg->type);
+ err = 0;
+ break;
+ }
+
+ /*
+ * If we're out of quota we exit and wait for tx_ready to
+ * queue us again.
+ */
+ if (err == -ENOBUFS)
+ break;
+
+ /* Any other error is fatal */
+ if (err < 0) {
+ dev_err(&server->service->dev,
+ "Failed to send pending message type %d: %d - resetting session",
+ msg->type, err);
+ vs_service_reset_nosync(server->service);
+ break;
+ }
+
+ /*
+ * The message sent successfully - remove it from the
+ * queue. The corresponding vs_get_service() was done
+ * when the pending message was created.
+ */
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&server->message_queue_lock);
+
+ vs_service_state_unlock(server->service);
+
+ return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = to_vs_session_device(dev->parent);
+ struct core_server *server = dev_to_core_server(&service->dev);
+ struct vs_service_device *new_service;
+ char *p;
+ ssize_t ret = count;
+
+ /* FIXME - Buffer sizes are not defined in generated headers */
+ /* discard leading whitespace */
+ while (count && isspace(*buf)) {
+ buf++;
+ count--;
+ }
+ if (!count) {
+ dev_info(dev, "empty service name");
+ return -EINVAL;
+ }
+ /* discard trailing whitespace */
+ while (count && isspace(buf[count - 1]))
+ count--;
+
+ if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+ dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+ return -EINVAL;
+ }
+
+ p = kstrndup(buf, count, GFP_KERNEL);
+
+ /*
+ * Writing a service name to this file creates a new service. The
+ * service is created without a protocol. It will appear in sysfs
+ * but will not be bound to a driver until a valid protocol name
+ * has been written to the created devices protocol sysfs attribute.
+ */
+ new_service = vs_server_core_create_service(server, session, service,
+ VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+ if (IS_ERR(new_service))
+ ret = PTR_ERR(new_service);
+
+ kfree(p);
+
+ return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *core_service = to_vs_service_device(dev);
+ struct vs_session_device *session =
+ vs_service_get_session(core_service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ /*
+ * Writing a valid service_id to this file does a reset of that service
+ */
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -EINVAL;
+
+ err = vs_service_reset(target, core_service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ if (service_id == 0) {
+ /*
+ * We don't allow removing the core service this way. The
+ * core service will be removed when the session is removed.
+ */
+ return -EINVAL;
+ }
+
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -EINVAL;
+
+ err = vs_service_delete(target, service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+ NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+ NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+ NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+ &dev_attr_create_service.attr,
+ &dev_attr_reset_service.attr,
+ &dev_attr_remove_service.attr,
+ NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+ .attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+ struct vs_session_device *session = vs_core_server_session(server);
+ struct vs_transport *transport = session->transport;
+ size_t size;
+ int err;
+
+ mutex_init(&server->alloc_lock);
+ mutex_lock(&server->alloc_lock);
+
+ transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+ &server->in_quota_remaining);
+
+ transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+ &server->in_notify_map_bits);
+
+ size = BITS_TO_LONGS(server->in_notify_map_bits) *
+ sizeof(unsigned long);
+ server->in_notify_map = kzalloc(size, GFP_KERNEL);
+ if (server->in_notify_map_bits && !server->in_notify_map) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ size = BITS_TO_LONGS(server->out_notify_map_bits) *
+ sizeof(unsigned long);
+ server->out_notify_map = kzalloc(size, GFP_KERNEL);
+ if (server->out_notify_map_bits && !server->out_notify_map) {
+ err = -ENOMEM;
+ goto fail_free_in_bits;
+ }
+
+ mutex_unlock(&server->alloc_lock);
+
+ return 0;
+
+fail_free_in_bits:
+ kfree(server->in_notify_map);
+fail:
+ mutex_unlock(&server->alloc_lock);
+ return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+ unsigned *remaining)
+{
+ unsigned quota;
+
+ if (set) {
+ quota = set;
+
+ if (quota > *remaining)
+ return -ENOSPC;
+ } else if (best) {
+ quota = min(best, *remaining);
+ } else {
+ quota = minimum;
+ }
+
+ if (quota < minimum)
+ return -ENOSPC;
+
+ *remaining -= quota;
+
+ return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+ unsigned nr_bits)
+{
+ unsigned offset;
+
+ if (notify_count) {
+ offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+ notify_count, 0);
+
+ if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+ return -ENOSPC;
+
+ bitmap_set(map, offset, notify_count);
+ } else {
+ offset = 0;
+ }
+
+ return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(service);
+ unsigned in_bit_offset, out_bit_offset;
+ unsigned in_quota, out_quota;
+ int ret;
+ struct vs_service_driver *driver;
+
+ if (WARN_ON(!service->dev.driver))
+ return -ENODEV;
+
+ mutex_lock(&server->alloc_lock);
+
+ driver = to_vs_service_driver(service->dev.driver);
+
+ /* Quota allocations */
+ ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+ service->in_quota_set, &server->in_quota_remaining);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate in quota\n");
+ goto fail_in_quota;
+ }
+ in_quota = ret;
+
+ ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+ service->out_quota_set, &server->out_quota_remaining);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate out quota\n");
+ goto fail_out_quota;
+ }
+ out_quota = ret;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+ service->id, in_quota, out_quota,
+ server->in_quota_remaining,
+ server->out_quota_remaining);
+
+ /* Notification bit allocations */
+ ret = alloc_notify_bits(service->notify_recv_bits,
+ server->in_notify_map, server->in_notify_map_bits);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate in notify bits\n");
+ goto fail_in_notify;
+ }
+ in_bit_offset = ret;
+
+ ret = alloc_notify_bits(service->notify_send_bits,
+ server->out_notify_map, server->out_notify_map_bits);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate out notify bits\n");
+ goto fail_out_notify;
+ }
+ out_bit_offset = ret;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "notify bits in: %u/%u out: %u/%u\n",
+ in_bit_offset, service->notify_recv_bits,
+ out_bit_offset, service->notify_send_bits);
+
+ /* Fill in the device's allocations */
+ service->recv_quota = in_quota;
+ service->send_quota = out_quota;
+ service->notify_recv_offset = in_bit_offset;
+ service->notify_send_offset = out_bit_offset;
+
+ mutex_unlock(&server->alloc_lock);
+
+ return 0;
+
+fail_out_notify:
+ if (service->notify_recv_bits)
+ bitmap_clear(server->in_notify_map,
+ in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+ server->out_quota_remaining += out_quota;
+fail_out_quota:
+ server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+ mutex_unlock(&server->alloc_lock);
+
+ service->recv_quota = 0;
+ service->send_quota = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+
+ return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+ struct vs_service_device *service)
+{
+ mutex_lock(&server->alloc_lock);
+
+ if (service->notify_recv_bits)
+ bitmap_clear(server->in_notify_map,
+ service->notify_recv_offset,
+ service->notify_recv_bits);
+
+ if (service->notify_send_bits)
+ bitmap_clear(server->out_notify_map,
+ service->notify_send_offset,
+ service->notify_send_bits);
+
+ server->in_quota_remaining += service->recv_quota;
+ server->out_quota_remaining += service->send_quota;
+
+ mutex_unlock(&server->alloc_lock);
+
+ service->recv_quota = 0;
+ service->send_quota = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+
+ return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+ struct core_server *server;
+ int err;
+
+ if (WARN_ON(service->id != 0))
+ goto fail;
+
+ server = kzalloc(sizeof(*server), GFP_KERNEL);
+ if (!server)
+ goto fail;
+
+ server->service = service;
+ INIT_LIST_HEAD(&server->message_queue);
+ INIT_WORK(&server->message_queue_work, message_queue_work);
+ mutex_init(&server->message_queue_lock);
+
+ err = init_transport_resource_allocation(server);
+ if (err)
+ goto fail_init_alloc;
+
+ err = alloc_transport_resources(server, service);
+ if (err)
+ goto fail_alloc_transport;
+
+ err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+ if (err)
+ goto fail_sysfs;
+
+ return &server->state;
+
+fail_sysfs:
+ free_transport_resources(server, service);
+fail_alloc_transport:
+ kfree(server->out_notify_map);
+ kfree(server->in_notify_map);
+fail_init_alloc:
+ kfree(server);
+fail:
+ return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ /* Delete all the other services */
+ vs_session_delete_noncore(session);
+
+ sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+ kfree(server->out_notify_map);
+ kfree(server->in_notify_map);
+ kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+ struct vs_service_device *parent, const char *name,
+ const char *protocol, const void *plat_data)
+{
+ struct vs_service_device *core_service, *new_service;
+ struct core_server *server;
+
+ if (!session->is_server || !name || !protocol)
+ return NULL;
+
+ core_service = session->core_service;
+ if (!core_service)
+ return NULL;
+
+ device_lock(&core_service->dev);
+ if (!core_service->dev.driver) {
+ device_unlock(&core_service->dev);
+ return NULL;
+ }
+
+ server = dev_to_core_server(&core_service->dev);
+
+ if (!parent)
+ parent = core_service;
+
+ new_service = vs_server_core_create_service(server, session, parent,
+ VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+ device_unlock(&core_service->dev);
+
+ if (IS_ERR(new_service))
+ return NULL;
+
+ return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+ struct vs_service_device *parent)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ if (!session->is_server || service->id == 0)
+ return -EINVAL;
+
+ if (!parent)
+ parent = session->core_service;
+
+ return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+ void *data)
+{
+ struct core_server *server = (struct core_server *)data;
+
+ vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+ int err;
+
+ /* Tell the other end that we've finished connecting. */
+ err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* Queue a service-created message for each existing service. */
+ vs_session_for_each_service(session, __queue_service_created, server);
+
+ /* Re-enable all the services. */
+ vs_session_enable_noncore(session);
+
+ return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+ struct vs_session_device *session = vs_core_server_session(server);
+ struct pending_message *msg;
+
+ /* Disable all the other services */
+ vs_session_disable_noncore(session);
+
+ /* Flush all the pending service-readiness messages */
+ mutex_lock(&server->message_queue_lock);
+ while (!list_empty(&server->message_queue)) {
+ msg = list_first_entry(&server->message_queue,
+ struct pending_message, list);
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+
+ vs_core_server_disable_services(server);
+
+ return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+ unsigned service_id)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+ "Core server start\n");
+
+ err = vs_server_core_core_send_startup(&server->state,
+ server->service->recv_quota,
+ server->service->send_quota, GFP_KERNEL);
+
+ if (err)
+ dev_err(&session->dev, "Failed to start core protocol: %d\n",
+ err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+ "Core server reset\n");
+
+ vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+ .alloc = vs_core_server_alloc,
+ .release = vs_core_server_release,
+ .start = vs_core_server_start,
+ .reset = vs_core_server_reset,
+ .tx_ready = vs_core_server_tx_ready,
+ .core = {
+ .req_connect = vs_server_core_handle_connect,
+ .req_disconnect = vs_server_core_handle_disconnect,
+ .msg_service_reset = vs_server_core_handle_service_reset,
+ },
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+ /* Don't match anything to the devio driver; it's bound manually */
+ if (!vsdrv->protocol)
+ return 0;
+
+ WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+ /* Don't match anything that doesn't have a protocol set yet */
+ if (!service->protocol)
+ return 0;
+
+ if (strcmp(service->protocol, vsdrv->protocol) == 0)
+ return 1;
+
+ return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+
+ /*
+ * Set the notify counts for the service, unless the driver is the
+ * devio driver in which case it has already been done by the devio
+ * bind ioctl. The devio driver cannot be bound automatically.
+ */
+ struct vs_service_driver *driver =
+ to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ if (driver != &vs_devio_server_driver)
+#endif
+ {
+ service->notify_recv_bits = driver->in_notify_count;
+ service->notify_send_bits = driver->out_notify_count;
+ }
+
+ /*
+ * We can't allocate transport resources here for the core service
+ * because the resource pool doesn't exist yet. It's done in alloc()
+ * instead (which is called, indirectly, by vs_service_bus_probe()).
+ */
+ if (service->id == 0)
+ return vs_service_bus_probe(dev);
+
+ if (!server)
+ return -ENODEV;
+ ret = alloc_transport_resources(server, service);
+ if (ret < 0)
+ goto fail;
+
+ ret = vs_service_bus_probe(dev);
+ if (ret < 0)
+ goto fail_free_resources;
+
+ return 0;
+
+fail_free_resources:
+ free_transport_resources(server, service);
+fail:
+ return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+
+ vs_service_bus_remove(dev);
+
+ /*
+ * We skip free_transport_resources for the core service because the
+ * resource pool has already been freed at this point. It's also
+ * possible that the core service has disappeared, in which case
+ * there's no work to do here.
+ */
+ if (server != NULL && service->id != 0)
+ free_transport_resources(server, service);
+
+ return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+ struct vs_service_device *service;
+ struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+ struct service_enable_work_struct *enable_work = container_of(work,
+ struct service_enable_work_struct, work);
+ struct vs_service_device *service = enable_work->service;
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ bool started;
+ int ret;
+
+ kfree(enable_work);
+
+ if (!server)
+ return;
+ /* Start and enable the service */
+ vs_service_state_lock(server->service);
+ started = vs_service_start(service);
+ if (!started) {
+ vs_service_state_unlock(server->service);
+ vs_put_service(service);
+ return;
+ }
+
+ if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+ vs_service_enable(service);
+ vs_service_state_unlock(server->service);
+
+ /* Tell the bus to search for a driver that supports the protocol */
+ ret = device_attach(&service->dev);
+ if (ret == 0)
+ dev_warn(&service->dev, "No driver found for protocol: %s\n",
+ service->protocol);
+ kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+ /* The corresponding vs_get_service was done when the work was queued */
+ vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct service_enable_work_struct *enable_work;
+
+ /* The protocol can only be set once */
+ if (service->protocol)
+ return -EPERM;
+
+ /* Registering additional core servers is not allowed */
+ if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+ return -EINVAL;
+
+ if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+ return -E2BIG;
+
+ enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+ if (!enable_work)
+ return -ENOMEM;
+
+ /* Set the protocol and tell the client about it */
+ service->protocol = kstrdup(buf, GFP_KERNEL);
+ if (!service->protocol) {
+ kfree(enable_work);
+ return -ENOMEM;
+ }
+ strim(service->protocol);
+
+ /*
+ * Schedule work to enable the service. We can't do it here because
+ * we need to take the core service lock, and doing that here makes
+ * it depend circularly on this sysfs attribute, which can be deleted
+ * with that lock held.
+ *
+ * The corresponding vs_put_service is called in the enable_work
+ * function.
+ */
+ INIT_WORK(&enable_work->work, service_enable_work);
+ enable_work->service = vs_get_service(service);
+ schedule_work(&enable_work->work);
+
+ return count;
+}
+
+static ssize_t service_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+ unsigned long in_quota;
+
+ if (!server)
+ return -ENODEV;
+ /*
+ * Don't allow quota to be changed for services that have a driver
+ * bound. We take the alloc lock here because the device lock is held
+ * while creating and destroying this sysfs item. This means we can
+ * race with driver binding, but that doesn't matter: we actually just
+ * want to know that alloc_transport_resources() hasn't run yet, and
+ * that takes the alloc lock.
+ */
+ mutex_lock(&server->alloc_lock);
+ if (service->dev.driver) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &in_quota);
+ if (ret < 0)
+ goto out;
+
+ service->in_quota_set = in_quota;
+ ret = count;
+
+out:
+ mutex_unlock(&server->alloc_lock);
+
+ return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static ssize_t quota_out_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+ unsigned long out_quota;
+
+ if (!server)
+ return -ENODEV;
+ /* See comment in quota_in_store. */
+ mutex_lock(&server->alloc_lock);
+ if (service->dev.driver) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &out_quota);
+ if (ret < 0)
+ goto out;
+
+ service->out_quota_set = out_quota;
+ ret = count;
+
+out:
+ mutex_unlock(&server->alloc_lock);
+
+ return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static struct device_attribute vs_server_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR(protocol, S_IRUGO | S_IWUSR,
+ dev_protocol_show, dev_protocol_store),
+ __ATTR_RO(service_name),
+ __ATTR(quota_in, S_IRUGO | S_IWUSR,
+ quota_in_show, quota_in_store),
+ __ATTR(quota_out, S_IRUGO | S_IWUSR,
+ quota_out_show, quota_out_store),
+ __ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+ struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_server_drv_attrs[] = {
+ __ATTR_RO(protocol),
+ __ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+ &driver_attr_protocol.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+#endif
+
+struct bus_type vs_server_bus_type = {
+ .name = "vservices-server",
+ .dev_attrs = vs_server_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ .drv_attrs = vs_server_drv_attrs,
+#else
+ .drv_groups = vs_server_drv_groups,
+#endif
+ .match = vs_server_bus_match,
+ .probe = vs_server_bus_probe,
+ .remove = vs_server_bus_remove,
+ .uevent = vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *service;
+
+ service = __vs_server_core_register_service(session, 0, NULL,
+ VSERVICE_CORE_SERVICE_NAME,
+ VSERVICE_CORE_PROTOCOL_NAME, NULL);
+ if (IS_ERR(service))
+ return PTR_ERR(service);
+
+ return 0;
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_created(server, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_created: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_reset_ready(server,
+ VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send server_ready: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_reset_ready(server,
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_reset: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ /*
+ * It's possible for the core server to be forcibly removed before
+ * the other services, for example when the underlying transport
+ * vanishes. If that happens, we can end up here with a NULL core
+ * server pointer.
+ */
+ if (!server)
+ return 0;
+
+ if (WARN_ON(!service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_removed(server, service);
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_removed: %d\n", err);
+
+ return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+ .driver = {
+ .name = "vservices-server-session",
+ .owner = THIS_MODULE,
+ .bus = &vs_session_bus_type,
+ .probe = vs_server_session_probe,
+ .suppress_bind_attrs = true,
+ },
+ .is_server = true,
+ .service_bus = &vs_server_bus_type,
+ .service_added = vs_server_session_service_added,
+ .service_start = vs_server_session_service_start,
+ .service_local_reset = vs_server_session_service_local_reset,
+ .service_removed = vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vs_server_bus_type);
+ if (ret)
+ goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+ vs_devio_server_driver.driver.owner = THIS_MODULE;
+ ret = driver_register(&vs_devio_server_driver.driver);
+ if (ret)
+ goto fail_devio_register;
+#endif
+
+ ret = driver_register(&vs_server_session_driver.driver);
+ if (ret)
+ goto fail_driver_register;
+
+ ret = vservice_core_server_register(&vs_core_server_driver,
+ "vs_core_server");
+ if (ret)
+ goto fail_core_register;
+
+ vservices_server_root = kobject_create_and_add("server-sessions",
+ vservices_root);
+ if (!vservices_server_root) {
+ ret = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ return 0;
+
+fail_create_root:
+ vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+ driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_server_driver.driver);
+ vs_devio_server_driver.driver.bus = NULL;
+ vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+ bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+ return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+ kobject_put(vservices_server_root);
+ vservice_core_server_unregister(&vs_core_server_driver);
+ driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_server_driver.driver);
+ vs_devio_server_driver.driver.bus = NULL;
+ vs_devio_server_driver.driver.owner = NULL;
+#endif
+ bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 000000000000..b379b04942d3
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT (1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES (1 << 1)
+#define VS_DEBUG_SESSION (1 << 2)
+#define VS_DEBUG_CLIENT (1 << 3)
+#define VS_DEBUG_CLIENT_CORE (1 << 4)
+#define VS_DEBUG_SERVER (1 << 5)
+#define VS_DEBUG_SERVER_CORE (1 << 6)
+#define VS_DEBUG_PROTOCOL (1 << 7)
+#define VS_DEBUG_ALL 0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...) \
+ do { \
+ if ((session)->debug_mask & (type)) \
+ dev_dbg(&(session)->dev, format, ##args); \
+ } while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+ do { \
+ if ((session)->debug_mask & (type)) \
+ dev_dbg(dev, format, ##args); \
+ } while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+ struct vs_mbuf *mbuf)
+{
+ if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+ print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+ mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+ do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+ do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+ struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 000000000000..b3ed4ab7d1d6
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1059 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ * Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+ struct kref kref;
+ bool running, reset;
+
+ /* Receive queue */
+ wait_queue_head_t recv_wq;
+ atomic_t notify_pending;
+ struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+ struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+ kref);
+
+ WARN_ON(priv->running);
+ WARN_ON(!list_empty_careful(&priv->recv_queue));
+ WARN_ON(waitqueue_active(&priv->recv_wq));
+
+ kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+ kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kref_init(&priv->kref);
+ priv->running = false;
+ priv->reset = false;
+ init_waitqueue_head(&priv->recv_wq);
+ atomic_set(&priv->notify_pending, 0);
+ INIT_LIST_HEAD(&priv->recv_queue);
+
+ dev_set_drvdata(&service->dev, priv);
+
+ wake_up(&service->quota_wq);
+
+ return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ WARN_ON(priv->running);
+ WARN_ON(!list_empty_careful(&priv->recv_queue));
+ WARN_ON(waitqueue_active(&priv->recv_wq));
+
+ vs_devio_priv_put(priv);
+
+ return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+ struct vs_mbuf *mbuf)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ WARN_ON(!priv->running);
+
+ spin_lock(&priv->recv_wq.lock);
+ list_add_tail(&mbuf->queue, &priv->recv_queue);
+ wake_up_locked(&priv->recv_wq);
+ spin_unlock(&priv->recv_wq.lock);
+
+ return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+ int old, cur;
+
+ WARN_ON(!priv->running);
+
+ if (!flags)
+ return;
+
+ /* open-coded atomic_or() */
+ cur = atomic_read(&priv->notify_pending);
+ while ((old = atomic_cmpxchg(&priv->notify_pending,
+ cur, cur | flags)) != cur)
+ cur = old;
+
+ wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ if (!priv->reset) {
+ WARN_ON(priv->running);
+ priv->running = true;
+ wake_up(&service->quota_wq);
+ }
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+ struct vs_mbuf *mbuf, *tmp;
+
+ WARN_ON(!priv->running && !priv->reset);
+
+ /*
+ * Mark the service as being in reset. This flag can never be cleared
+ * on an open device; the user must acknowledge the reset by closing
+ * and reopening the device.
+ */
+ priv->reset = true;
+ priv->running = false;
+
+ spin_lock_irq(&priv->recv_wq.lock);
+ list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+ vs_service_free_mbuf(service, mbuf);
+ INIT_LIST_HEAD(&priv->recv_queue);
+ spin_unlock_irq(&priv->recv_wq.lock);
+ wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+ /* No protocol, so the normal bus match will never bind this. */
+ .protocol = NULL,
+ .is_server = true,
+ .rx_atomic = true,
+
+ .probe = vs_devio_service_probe,
+ .remove = vs_devio_service_remove,
+ .receive = vs_devio_service_receive,
+ .notify = vs_devio_service_notify,
+ .start = vs_devio_service_start,
+ .reset = vs_devio_service_reset,
+
+ /*
+ * Set reasonable default quotas. These can be overridden by passing
+ * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+ * service's *_quota_set fields.
+ */
+ .in_quota_min = 1,
+ .in_quota_best = 8,
+ .out_quota_min = 1,
+ .out_quota_best = 8,
+
+ /* Mark the notify counts as invalid; the service's will be used. */
+ .in_notify_count = (unsigned)-1,
+ .out_notify_count = (unsigned)-1,
+
+ .driver = {
+ .name = "vservices-server-devio",
+ .owner = NULL, /* set by core server */
+ .bus = NULL, /* set by core server */
+ .suppress_bind_attrs = true, /* see vs_devio_poll */
+ },
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+ struct vs_ioctl_bind *bind)
+{
+ int ret = -ENODEV;
+
+ /* Ensure the server module is loaded and the driver is registered. */
+ if (!try_module_get(vs_devio_server_driver.driver.owner))
+ goto fail_module_get;
+
+ device_lock(&service->dev);
+ ret = -EBUSY;
+ if (service->dev.driver != NULL)
+ goto fail_device_unbound;
+
+ /* Set up the quota and notify counts. */
+ service->in_quota_set = bind->recv_quota;
+ service->out_quota_set = bind->send_quota;
+ service->notify_send_bits = bind->send_notify_bits;
+ service->notify_recv_bits = bind->recv_notify_bits;
+
+ /* Manually probe the driver. */
+ service->dev.driver = &vs_devio_server_driver.driver;
+ ret = service->dev.bus->probe(&service->dev);
+ if (ret < 0)
+ goto fail_probe_driver;
+
+ ret = device_bind_driver(&service->dev);
+ if (ret < 0)
+ goto fail_bind_driver;
+
+ /* Pass the allocated quotas back to the user. */
+ bind->recv_quota = service->recv_quota;
+ bind->send_quota = service->send_quota;
+ bind->msg_size = vs_service_max_mbuf_size(service);
+
+ device_unlock(&service->dev);
+ module_put(vs_devio_server_driver.driver.owner);
+
+ return 0;
+
+fail_bind_driver:
+ ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+ service->dev.driver = NULL;
+fail_device_unbound:
+ device_unlock(&service->dev);
+ module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+ return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+ /* No protocol, so the normal bus match will never bind this. */
+ .protocol = NULL,
+ .is_server = false,
+ .rx_atomic = true,
+
+ .probe = vs_devio_service_probe,
+ .remove = vs_devio_service_remove,
+ .receive = vs_devio_service_receive,
+ .notify = vs_devio_service_notify,
+ .start = vs_devio_service_start,
+ .reset = vs_devio_service_reset,
+
+ .driver = {
+ .name = "vservices-client-devio",
+ .owner = NULL, /* set by core client */
+ .bus = NULL, /* set by core client */
+ .suppress_bind_attrs = true, /* see vs_devio_poll */
+ },
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+ struct vs_ioctl_bind *bind)
+{
+ int ret = -ENODEV;
+
+ /* Ensure the client module is loaded and the driver is registered. */
+ if (!try_module_get(vs_devio_client_driver.driver.owner))
+ goto fail_module_get;
+
+ device_lock(&service->dev);
+ ret = -EBUSY;
+ if (service->dev.driver != NULL)
+ goto fail_device_unbound;
+
+ /* Manually probe the driver. */
+ service->dev.driver = &vs_devio_client_driver.driver;
+ ret = service->dev.bus->probe(&service->dev);
+ if (ret < 0)
+ goto fail_probe_driver;
+
+ ret = device_bind_driver(&service->dev);
+ if (ret < 0)
+ goto fail_bind_driver;
+
+ /* Pass the allocated quotas back to the user. */
+ bind->recv_quota = service->recv_quota;
+ bind->send_quota = service->send_quota;
+ bind->msg_size = vs_service_max_mbuf_size(service);
+ bind->send_notify_bits = service->notify_send_bits;
+ bind->recv_notify_bits = service->notify_recv_bits;
+
+ device_unlock(&service->dev);
+ module_put(vs_devio_client_driver.driver.owner);
+
+ return 0;
+
+fail_bind_driver:
+ ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+ service->dev.driver = NULL;
+fail_device_unbound:
+ device_unlock(&service->dev);
+ module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+ return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = NULL;
+ struct device_driver *drv;
+
+ if (!service)
+ return NULL;
+
+ device_lock(&service->dev);
+ drv = service->dev.driver;
+
+ if ((drv == &vs_devio_client_driver.driver) ||
+ (drv == &vs_devio_server_driver.driver)) {
+ vs_service_state_lock(service);
+ priv = dev_get_drvdata(&service->dev);
+ if (priv)
+ kref_get(&priv->kref);
+ vs_service_state_unlock(service);
+ }
+
+ device_unlock(&service->dev);
+
+ return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+ struct vs_service_device *service;
+
+ if (imajor(inode) != vservices_cdev_major)
+ return -ENODEV;
+
+ service = vs_service_lookup_by_devt(inode->i_rdev);
+ if (!service)
+ return -ENODEV;
+
+ file->private_data = service;
+
+ return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+ struct vs_service_device *service = file->private_data;
+
+ if (service) {
+ struct vs_devio_priv *priv =
+ vs_devio_priv_get_from_service(service);
+
+ if (priv) {
+ device_release_driver(&service->dev);
+ vs_devio_priv_put(priv);
+ }
+
+ file->private_data = NULL;
+ vs_put_service(service);
+ }
+
+ return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+ struct iovec *iov;
+ unsigned i;
+ int ret;
+
+ if (io->iovcnt > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+
+ iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+ if (!iov)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total = 0;
+ for (i = 0; i < io->iovcnt; i++) {
+ ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+ if (iov_len > MAX_RW_COUNT - *total) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+ iov[i].iov_base, iov_len)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total += iov_len;
+ }
+
+ return iov;
+
+fail:
+ kfree(iov);
+ return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+ size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+ struct vs_mbuf *mbuf = NULL;
+ struct vs_devio_priv *priv;
+ unsigned i;
+ ssize_t offset = 0;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+ priv = vs_devio_priv_get_from_service(service);
+ ret = -ENODEV;
+ if (!priv)
+ goto fail_priv_get;
+
+ vs_service_state_lock(service);
+
+ /*
+ * Waiting alloc. We must open-code this because there is no real
+ * state structure or base state.
+ */
+ ret = 0;
+ while (!vs_service_send_mbufs_available(service)) {
+ if (nonblocking) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ prepare_to_wait_exclusive(&service->quota_wq, &wait,
+ TASK_INTERRUPTIBLE);
+
+ vs_service_state_unlock(service);
+ schedule();
+ vs_service_state_lock(service);
+
+ if (priv->reset) {
+ ret = -ECONNRESET;
+ break;
+ }
+
+ if (!priv->running) {
+ ret = -ENOTCONN;
+ break;
+ }
+ }
+ finish_wait(&service->quota_wq, &wait);
+
+ if (ret)
+ goto fail_alloc;
+
+ mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+ if (IS_ERR(mbuf)) {
+ ret = PTR_ERR(mbuf);
+ goto fail_alloc;
+ }
+
+ /* Ready to send; copy data into the mbuf. */
+ ret = -EFAULT;
+ for (i = 0; i < iovcnt; i++) {
+ if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+ iov[i].iov_len))
+ goto fail_copy;
+ offset += iov[i].iov_len;
+ }
+ mbuf->size = to_send;
+
+ /* Send the message. */
+ ret = vs_service_send(service, mbuf);
+ if (ret < 0)
+ goto fail_send;
+
+ /* Wake the next waiter, if there's more quota available. */
+ if (waitqueue_active(&service->quota_wq) &&
+ vs_service_send_mbufs_available(service) > 0)
+ wake_up(&service->quota_wq);
+
+ vs_service_state_unlock(service);
+ vs_devio_priv_put(priv);
+
+ return to_send;
+
+fail_send:
+fail_copy:
+ vs_service_free_mbuf(service, mbuf);
+ wake_up(&service->quota_wq);
+fail_alloc:
+ vs_service_state_unlock(service);
+ vs_devio_priv_put(priv);
+fail_priv_get:
+ return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+ size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+ bool nonblocking)
+{
+ struct vs_mbuf *mbuf = NULL;
+ struct vs_devio_priv *priv;
+ unsigned i;
+ ssize_t offset = 0;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+ priv = vs_devio_priv_get_from_service(service);
+ ret = -ENODEV;
+ if (!priv)
+ goto fail_priv_get;
+
+ /* Take the recv_wq lock, which also protects recv_queue. */
+ spin_lock_irq(&priv->recv_wq.lock);
+
+ /* Wait for a message, notification, or reset. */
+ ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+ !list_empty(&priv->recv_queue) || priv->reset ||
+ atomic_read(&priv->notify_pending) || nonblocking);
+
+ if (priv->reset)
+ ret = -ECONNRESET; /* Service reset */
+ else if (!ret && list_empty(&priv->recv_queue))
+ ret = -EAGAIN; /* Nonblocking, or notification */
+
+ if (ret < 0) {
+ spin_unlock_irq(&priv->recv_wq.lock);
+ goto no_mbuf;
+ }
+
+ /* Take the first mbuf from the list, and check its size. */
+ mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+ if (mbuf->size > recv_space) {
+ spin_unlock_irq(&priv->recv_wq.lock);
+ ret = -EMSGSIZE;
+ goto fail_msg_size;
+ }
+ list_del_init(&mbuf->queue);
+
+ spin_unlock_irq(&priv->recv_wq.lock);
+
+ /* Copy to user. */
+ ret = -EFAULT;
+ for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+ size_t len = min(mbuf->size - offset, iov[i].iov_len);
+ if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+ goto fail_copy;
+ offset += len;
+ }
+ ret = offset;
+
+no_mbuf:
+ /*
+ * Read and clear the pending notification bits. If any notifications
+ * are received, don't return an error, even if we failed to receive a
+ * message.
+ */
+ *notify_bits = atomic_xchg(&priv->notify_pending, 0);
+ if ((ret < 0) && *notify_bits)
+ ret = 0;
+
+fail_copy:
+ if (mbuf)
+ vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+ vs_devio_priv_put(priv);
+fail_priv_get:
+ return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+ if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+ return -EBADF;
+
+ if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *ptr = (void __user *)arg;
+ struct vs_service_device *service = file->private_data;
+ struct vs_ioctl_bind bind;
+ struct vs_ioctl_iovec io;
+ u32 flags;
+ long ret;
+ ssize_t iov_total;
+ struct iovec *iov;
+
+ if (!service)
+ return -ENODEV;
+
+ switch (cmd) {
+ case IOCTL_VS_RESET_SERVICE:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ ret = vs_service_reset(service, service);
+ break;
+ case IOCTL_VS_GET_NAME:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (service->name != NULL) {
+ size_t len = strnlen(service->name,
+ _IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+ if (copy_to_user(ptr, service->name, len + 1))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IOCTL_VS_GET_PROTOCOL:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (service->protocol != NULL) {
+ size_t len = strnlen(service->protocol,
+ _IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+ if (copy_to_user(ptr, service->protocol, len + 1))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IOCTL_VS_BIND_CLIENT:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ ret = vs_devio_bind_client(service, &bind);
+ if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+ ret = -EFAULT;
+ break;
+ case IOCTL_VS_BIND_SERVER:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&bind, ptr, sizeof(bind))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = vs_devio_bind_server(service, &bind);
+ if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+ ret = -EFAULT;
+ break;
+ case IOCTL_VS_NOTIFY:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&flags, ptr, sizeof(flags))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = vs_service_notify(service, flags);
+ break;
+ case IOCTL_VS_SEND:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&io, ptr, sizeof(io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_iov(&io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+ break;
+ case IOCTL_VS_RECV:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&io, ptr, sizeof(io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_iov(&io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_recv(service, iov, io.iovcnt,
+ &io.notify_bits, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ if (ret >= 0) {
+ u32 __user *notify_bits_ptr = ptr + offsetof(
+ struct vs_ioctl_iovec, notify_bits);
+ if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+ sizeof(io.notify_bits)))
+ ret = -EFAULT;
+ }
+ break;
+ default:
+ dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+ arg);
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+ __u32 send_quota;
+ __u32 recv_quota;
+ __u32 send_notify_bits;
+ __u32 recv_notify_bits;
+ compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+ dest.send_quota = src.send_quota; \
+ dest.recv_quota = src.recv_quota; \
+ dest.send_notify_bits = src.send_notify_bits; \
+ dest.recv_notify_bits = src.recv_notify_bits; \
+ dest.msg_size = (compat_size_t)src.msg_size; \
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+ union {
+ __u32 iovcnt; /* input */
+ __u32 notify_bits; /* output (recv only) */
+ };
+ compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+ _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+ _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+ bool is_send, ssize_t *total)
+{
+ struct iovec *iov;
+ struct compat_iovec *c_iov;
+
+ unsigned i;
+ int ret;
+
+ if (c_io->iovcnt > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+
+ c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+ if (!c_iov)
+ return ERR_PTR(-ENOMEM);
+
+ iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+ if (!iov) {
+ kfree(c_iov);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (copy_from_user(c_iov, (struct compat_iovec __user *)
+ compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total = 0;
+ for (i = 0; i < c_io->iovcnt; i++) {
+ ssize_t iov_len;
+ iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+ iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+ iov_len = (ssize_t)iov[i].iov_len;
+
+ if (iov_len > MAX_RW_COUNT - *total) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+ iov[i].iov_base, iov_len)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total += iov_len;
+ }
+
+ kfree (c_iov);
+ return iov;
+
+fail:
+ kfree(c_iov);
+ kfree(iov);
+ return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *ptr = (void __user *)arg;
+ struct vs_service_device *service = file->private_data;
+ struct vs_ioctl_bind bind;
+ struct vs_compat_ioctl_bind compat_bind;
+ struct vs_compat_ioctl_iovec compat_io;
+ long ret;
+ ssize_t iov_total;
+ struct iovec *iov;
+
+ if (!service)
+ return -ENODEV;
+
+ switch (cmd) {
+ case IOCTL_VS_RESET_SERVICE:
+ case IOCTL_VS_GET_NAME:
+ case IOCTL_VS_GET_PROTOCOL:
+ return vs_devio_ioctl (file, cmd, arg);
+ case COMPAT_IOCTL_VS_SEND:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ break;
+ case COMPAT_IOCTL_VS_RECV:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+ &compat_io.notify_bits, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ if (ret >= 0) {
+ u32 __user *notify_bits_ptr = ptr + offsetof(
+ struct vs_compat_ioctl_iovec, notify_bits);
+ if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+ sizeof(compat_io.notify_bits)))
+ ret = -EFAULT;
+ }
+ break;
+ case COMPAT_IOCTL_VS_BIND_CLIENT:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ ret = vs_devio_bind_client(service, &bind);
+ compat_ioctl_bind_conv(compat_bind, bind);
+ if (!ret && copy_to_user(ptr, &compat_bind,
+ sizeof(compat_bind)))
+ ret = -EFAULT;
+ break;
+ case COMPAT_IOCTL_VS_BIND_SERVER:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+ ret = -EFAULT;
+ break;
+ }
+ compat_ioctl_bind_conv(bind, compat_bind);
+ ret = vs_devio_bind_server(service, &bind);
+ compat_ioctl_bind_conv(compat_bind, bind);
+ if (!ret && copy_to_user(ptr, &compat_bind,
+ sizeof(compat_bind)))
+ ret = -EFAULT;
+ break;
+ default:
+ dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+ arg);
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct vs_service_device *service = file->private_data;
+ struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+ unsigned int flags = 0;
+
+ poll_wait(file, &service->quota_wq, wait);
+
+ if (priv) {
+ /*
+ * Note: there is no way for us to ensure that all poll
+ * waiters on a given workqueue have gone away, other than to
+ * actually close the file. So, this poll_wait() is only safe
+ * if we never release our claim on the service before the
+ * file is closed.
+ *
+ * We try to guarantee this by only unbinding the devio driver
+ * on close, and setting suppress_bind_attrs in the driver so
+ * root can't unbind us with sysfs.
+ */
+ poll_wait(file, &priv->recv_wq, wait);
+
+ if (priv->reset) {
+ /* Service reset; raise poll error. */
+ flags |= POLLERR | POLLHUP;
+ } else if (priv->running) {
+ if (!list_empty_careful(&priv->recv_queue))
+ flags |= POLLRDNORM | POLLIN;
+ if (atomic_read(&priv->notify_pending))
+ flags |= POLLRDNORM | POLLIN;
+ if (vs_service_send_mbufs_available(service) > 0)
+ flags |= POLLWRNORM | POLLOUT;
+ }
+
+ vs_devio_priv_put(priv);
+ } else {
+ /* No driver attached. Return error flags. */
+ flags |= POLLERR | POLLHUP;
+ }
+
+ return flags;
+}
+
+static const struct file_operations vs_fops = {
+ .owner = THIS_MODULE,
+ .open = vs_devio_open,
+ .release = vs_devio_release,
+ .unlocked_ioctl = vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vs_devio_compat_ioctl,
+#endif
+ .poll = vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+ dev_t dev;
+ int r;
+
+ r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+ "vs_service");
+ if (r < 0)
+ goto fail_alloc_chrdev;
+ vservices_cdev_major = MAJOR(dev);
+
+ cdev_init(&vs_cdev, &vs_fops);
+ r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+ if (r < 0)
+ goto fail_cdev_add;
+
+ return 0;
+
+fail_cdev_add:
+ unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+ return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+ cdev_del(&vs_cdev);
+ unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+ VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 000000000000..e0f2798c8a0d
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,44 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+config VSERVICES_PROTOCOL_BLOCK
+ bool
+
+config VSERVICES_PROTOCOL_BLOCK_SERVER
+ tristate "Block server protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_PROTOCOL_BLOCK
+ help
+ This option adds support for Virtual Services block protocol server.
+
+config VSERVICES_PROTOCOL_BLOCK_CLIENT
+ tristate "Block client protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_PROTOCOL_BLOCK
+ help
+ This option adds support for Virtual Services block protocol client.
+
+config VSERVICES_PROTOCOL_SERIAL
+ bool
+
+config VSERVICES_PROTOCOL_SERIAL_SERVER
+ tristate "Serial server protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_PROTOCOL_SERIAL
+ help
+ This option adds support for Virtual Services serial protocol server.
+
+config VSERVICES_PROTOCOL_SERIAL_CLIENT
+ tristate "Serial client protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_PROTOCOL_SERIAL
+ help
+ This option adds support for Virtual Services serial protocol client.
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 000000000000..0c714e05e4c7
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,5 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK) += block/
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL) += serial/
diff --git a/drivers/vservices/protocol/block/Makefile b/drivers/vservices/protocol/block/Makefile
new file mode 100644
index 000000000000..325b57e390e2
--- /dev/null
+++ b/drivers/vservices/protocol/block/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_SERVER) += vservices_protocol_block_server.o
+vservices_protocol_block_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_CLIENT) += vservices_protocol_block_client.o
+vservices_protocol_block_client-objs = client.o
diff --git a/drivers/vservices/protocol/block/client.c b/drivers/vservices/protocol/block/client.c
new file mode 100644
index 000000000000..702a30a82a9d
--- /dev/null
+++ b/drivers/vservices/protocol/block/client.c
@@ -0,0 +1,1186 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the block client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_block_client_driver {
+ struct vs_client_block *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_block_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ int i __maybe_unused;
+
+ /* Clear out pending read commands */
+ for_each_set_bit(i, state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING) {
+ void *tag = state->state.io.read_tags[i];
+
+ if (client->io.nack_read)
+ client->io.nack_read(state, tag,
+ VSERVICE_BLOCK_SERVICE_RESET);
+
+ __clear_bit(i, state->state.io.read_bitmask);
+ }
+
+ /* Clear out pending write commands */
+ for_each_set_bit(i, state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING) {
+ void *tag = state->state.io.write_tags[i];
+
+ if (client->io.nack_write)
+ client->io.nack_write(state, tag,
+ VSERVICE_BLOCK_SERVICE_RESET);
+
+ __clear_bit(i, state->state.io.write_bitmask);
+ }
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ _vs_client_block_req_open(state);
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ _vs_client_block_req_open(state);
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int block_client_probe(struct vs_service_device *service);
+static int block_client_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_client_register(struct vs_client_block *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_block_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = block_client_probe;
+ driver->vsdrv.remove = block_client_remove;
+ driver->vsdrv.receive = block_handle_message;
+ driver->vsdrv.notify = block_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ block_handle_start_bh : block_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ block_handle_reset_bh : block_handle_reset;
+ driver->vsdrv.tx_ready = block_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_client_register);
+
+int vservice_block_client_unregister(struct vs_client_block *client)
+{
+ struct vs_block_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_client_unregister);
+
+static int block_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+ struct vs_client_block_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int block_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_OPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_open);
+static int _vs_client_block_req_close(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_CLOSE;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_close);
+static int _vs_client_block_req_reopen(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_REOPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_reopen);
+static int
+block_base_handle_ack_open(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ _state->io.sector_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ _state->io.segment_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ _state->readonly =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ _state->sector_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ _state->segment_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ _state->device_sectors =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ _state->flushable =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ _state->committable =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ _client->opened(_state);
+ return 0;
+
+}
+
+static int
+block_base_handle_nack_open(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+ "Open operation failed for device %s\n",
+ VS_STATE_SERVICE_PTR(_state)->name);
+
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_open);
+static int
+block_base_handle_ack_close(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+static int
+block_base_handle_nack_close(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_close);
+static int
+block_base_handle_ack_reopen(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->reopened) {
+ _client->reopened(_state);
+ return 0;
+ }
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return _vs_client_block_req_open(_state);
+
+}
+
+static int
+block_base_handle_nack_reopen(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_reopen);
+int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ data->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data->max_size = data->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_getbufs_ack_read);
+int vs_client_block_io_free_ack_read(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_ack_read);
+struct vs_mbuf *vs_client_block_io_alloc_req_write(struct vs_client_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!data)
+ goto fail;
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data->size = _state->io.segment_size;
+ data->max_size = data->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_alloc_req_write);
+int vs_client_block_io_free_req_write(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_req_write);
+int
+vs_client_block_io_req_read(struct vs_client_block_state *_state, void *_opaque,
+ uint64_t sector_index, uint32_t num_sects,
+ bool nodelay, bool flush, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 24UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+ uint32_t _opaque_tmp;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ _opaque_tmp =
+ find_first_zero_bit(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = VSERVICE_BLOCK_IO_REQ_READ;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque_tmp;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ sector_index;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ num_sects;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ nodelay;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ flush;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.io.read_tags[_opaque_tmp] = _opaque;
+ __set_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_read);
+int
+vs_client_block_io_req_write(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush,
+ bool commit, struct vs_pbuf data,
+ struct vs_mbuf *_mbuf)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+ uint32_t _opaque_tmp;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ _opaque_tmp =
+ find_first_zero_bit(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_BLOCK_IO_REQ_WRITE)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque_tmp;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ sector_index;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ num_sects;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ nodelay;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ flush;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ commit;
+ if ((data.size + sizeof(vs_message_id_t) + 28UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (data.size < data.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 28UL) =
+ data.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.io.write_tags[_opaque_tmp] = _opaque;
+ __set_bit(_opaque_tmp, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_write);
+static int
+block_io_handle_ack_read(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ void *_opaque;
+ struct vs_pbuf data;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+ uint32_t _opaque_tmp;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.read_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+ data.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ data.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data.max_size = data.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->io.ack_read)
+ return _client->io.ack_read(_state, _opaque, data, _mbuf);
+ return 0;
+}
+
+static int
+block_io_handle_nack_read(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ void *_opaque;
+ vservice_block_block_io_error_t err;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.read_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+ err =
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) +
+ 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.nack_read)
+ return _client->io.nack_read(_state, _opaque, err);
+ return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_read);
+static int
+block_io_handle_ack_write(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ void *_opaque;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.write_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.ack_write)
+ return _client->io.ack_write(_state, _opaque);
+ return 0;
+}
+
+static int
+block_io_handle_nack_write(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ void *_opaque;
+ vservice_block_block_io_error_t err;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.write_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+ err =
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) +
+ 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.nack_write)
+ return _client->io.nack_write(_state, _opaque, err);
+ return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_block *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_BLOCK_BASE_ACK_OPEN:
+ ret = block_base_handle_ack_open(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_OPEN:
+ ret = block_base_handle_nack_open(client, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_BLOCK_BASE_ACK_CLOSE:
+ ret = block_base_handle_ack_close(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_CLOSE:
+ ret = block_base_handle_nack_close(client, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_BLOCK_BASE_ACK_REOPEN:
+ ret = block_base_handle_ack_reopen(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_REOPEN:
+ ret = block_base_handle_nack_reopen(client, state, _mbuf);
+ break;
+
+/** interface block_io **/
+/* command in parallel read */
+ case VSERVICE_BLOCK_IO_ACK_READ:
+ ret = block_io_handle_ack_read(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_IO_NACK_READ:
+ ret = block_io_handle_nack_read(client, state, _mbuf);
+ break;
+
+/* command in parallel write */
+ case VSERVICE_BLOCK_IO_ACK_WRITE:
+ ret = block_io_handle_ack_write(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_IO_NACK_WRITE:
+ ret = block_io_handle_nack_write(client, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_block *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface block_io **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+int vs_client_block_reopen(struct vs_client_block_state *_state)
+{
+ return _vs_client_block_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_reopen);
+
+int vs_client_block_close(struct vs_client_block_state *_state)
+{
+ return _vs_client_block_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/block/server.c b/drivers/vservices/protocol/block/server.c
new file mode 100644
index 000000000000..a4a7d1a0c214
--- /dev/null
+++ b/drivers/vservices/protocol/block/server.c
@@ -0,0 +1,1371 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the block server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_block_server_driver {
+ struct vs_server_block *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_block_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int block_server_probe(struct vs_service_device *service);
+static int block_server_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_server_register(struct vs_server_block *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_block_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_BLOCK_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_BLOCK_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = block_server_probe;
+ driver->vsdrv.remove = block_server_remove;
+ driver->vsdrv.receive = block_handle_message;
+ driver->vsdrv.notify = block_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ block_handle_start_bh : block_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ block_handle_reset_bh : block_handle_reset;
+ driver->vsdrv.tx_ready = block_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_server_register);
+
+int vservice_block_server_unregister(struct vs_server_block *server)
+{
+ struct vs_block_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_server_unregister);
+
+static int block_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+ struct vs_server_block_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int block_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+static int
+vs_server_block_send_ack_open(struct vs_server_block_state *_state, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_OPEN;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _state->readonly;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ _state->sector_size;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+ _state->segment_size;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ _state->device_sectors;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ _state->flushable;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ _state->committable;
+ _state->io.sector_size = _state->sector_size;
+ _state->io.segment_size = _state->segment_size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_open);
+static int
+vs_server_block_send_nack_open(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_open);
+static int
+vs_server_block_send_ack_close(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_close);
+static int
+vs_server_block_send_nack_close(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_close);
+static int
+vs_server_block_send_ack_reopen(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_reopen);
+static int
+vs_server_block_send_nack_reopen(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_reopen);
+static int
+vs_server_block_handle_req_open(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->open)
+ return vs_server_block_open_complete(_state,
+ _server->open(_state));
+ return vs_server_block_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_open_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_block_send_ack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR(_state))
+ ? GFP_ATOMIC : GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_block_send_nack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_open_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_open);
+static int
+vs_server_block_handle_req_close(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->close)
+ return vs_server_block_close_complete(_state,
+ _server->close(_state));
+ return vs_server_block_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_close_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_block_send_ack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_block_send_nack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+ wake_up_all(&_state->service->quota_wq);
+ }
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_close_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_close);
+static int
+vs_server_block_handle_req_reopen(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->reopen)
+ return vs_server_block_reopen_complete(_state,
+ _server->reopen(_state));
+ else
+ return vs_server_block_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC
+ : GFP_KERNEL);
+
+}
+
+int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS) {
+ _state->io.sector_size = _state->sector_size;
+ _state->io.segment_size = _state->segment_size;
+ ret =
+ vs_server_block_send_ack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ } else if (resp == VS_SERVER_RESP_FAILURE) {
+ ret =
+ vs_server_block_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ }
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_reopen);
+struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!data)
+ goto fail;
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data->size = _state->io.segment_size;
+ data->max_size = data->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_alloc_ack_read);
+int vs_server_block_io_free_ack_read(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_ack_read);
+int vs_server_block_io_getbufs_req_write(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ data->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL);
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data->max_size = data->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_getbufs_req_write);
+int vs_server_block_io_free_req_write(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_req_write);
+int
+vs_server_block_io_send_ack_read(struct vs_server_block_state *_state,
+ uint32_t _opaque, struct vs_pbuf data,
+ struct vs_mbuf *_mbuf)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_BLOCK_IO_ACK_READ)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ if ((data.size + sizeof(vs_message_id_t) + 4UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (data.size < data.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ data.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_read);
+int
+vs_server_block_io_send_nack_read(struct vs_server_block_state *_state,
+ uint32_t _opaque,
+ vservice_block_block_io_error_t err,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_NACK_READ;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) + 4UL) =
+ err;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_read);
+int
+vs_server_block_io_send_ack_write(struct vs_server_block_state *_state,
+ uint32_t _opaque, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_ACK_WRITE;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_write);
+int
+vs_server_block_io_send_nack_write(struct vs_server_block_state *_state,
+ uint32_t _opaque,
+ vservice_block_block_io_error_t err,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_NACK_WRITE;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) + 4UL) =
+ err;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_write);
+static int
+vs_server_block_io_handle_req_read(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 24UL;
+ uint32_t _opaque;
+ uint64_t sector_index;
+ uint32_t num_sects;
+ bool nodelay;
+ bool flush;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+ __set_bit(_opaque, _state->state.io.read_bitmask);
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ sector_index =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ num_sects =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ nodelay =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+ flush =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->io.req_read)
+ return _server->io.req_read(_state, _opaque, sector_index,
+ num_sects, nodelay, flush);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->io.req_read, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_read);
+static int
+vs_server_block_io_handle_req_write(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ uint32_t _opaque;
+ uint64_t sector_index;
+ uint32_t num_sects;
+ bool nodelay;
+ bool flush;
+ bool commit;
+ struct vs_pbuf data;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+ __set_bit(_opaque, _state->state.io.write_bitmask);
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ sector_index =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ num_sects =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ nodelay =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+ flush =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ commit =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+ data.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL);
+ data.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data.max_size = data.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_server->io.req_write)
+ return _server->io.req_write(_state, _opaque, sector_index,
+ num_sects, nodelay, flush, commit,
+ data, _mbuf);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->io.req_write, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_block *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_BLOCK_BASE_REQ_OPEN:
+ ret = vs_server_block_handle_req_open(server, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_BLOCK_BASE_REQ_CLOSE:
+ ret = vs_server_block_handle_req_close(server, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_BLOCK_BASE_REQ_REOPEN:
+ ret = vs_server_block_handle_req_reopen(server, state, _mbuf);
+ break;
+
+/** interface block_io **/
+/* command in parallel read */
+ case VSERVICE_BLOCK_IO_REQ_READ:
+ ret = vs_server_block_io_handle_req_read(server, state, _mbuf);
+ break;
+
+/* command in parallel write */
+ case VSERVICE_BLOCK_IO_REQ_WRITE:
+ ret = vs_server_block_io_handle_req_write(server, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_block *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface block_io **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 000000000000..6bef7f5f3cdd
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 000000000000..2dd213662fc2
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the core client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+ struct vs_client_core *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->start)
+ client->start(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->reset)
+ client->reset(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->start)
+ client->start(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->reset)
+ client->reset(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_core_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = core_client_probe;
+ driver->vsdrv.remove = core_client_remove;
+ driver->vsdrv.receive = core_handle_message;
+ driver->vsdrv.notify = core_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ core_handle_start_bh : core_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ core_handle_reset_bh : core_handle_reset;
+ driver->vsdrv.tx_ready = core_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+ struct vs_core_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+ struct vs_client_core_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ service_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+ protocol_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name->max_size =
+ VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+ /* Now check the size received is the exact size expected */
+ _exact_size =
+ _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+ protocol_name->max_size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_REQ_CONNECT;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.ack_connect)
+ return _client->core.ack_connect(_state);
+ return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.nack_connect)
+ return _client->core.nack_connect(_state);
+ return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.ack_disconnect)
+ return _client->core.ack_disconnect(_state);
+ return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.nack_disconnect)
+ return _client->core.nack_disconnect(_state);
+ return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ uint32_t core_in_quota;
+ uint32_t core_out_quota;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_OFFLINE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ core_in_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ core_out_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_startup)
+ return _client->core.msg_startup(_state, core_in_quota,
+ core_out_quota);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+ case VSERVICE_CORE_STATE_CONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+
+ default:
+ break;
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_shutdown)
+ return _client->core.msg_shutdown(_state);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ uint32_t service_id;
+ struct vs_string service_name;
+ struct vs_string protocol_name;
+ const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+ size_t _exact_size;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ service_name.ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+ protocol_name.ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name.max_size =
+ VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+ /* Now check the size received is the exact size expected */
+ _exact_size =
+ _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+ protocol_name.max_size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->core.msg_service_created)
+ return _client->core.msg_service_created(_state, service_id,
+ service_name,
+ protocol_name, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_service_removed)
+ return _client->core.msg_service_removed(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+ uint32_t service_id;
+ uint32_t in_quota;
+ uint32_t out_quota;
+ uint32_t in_bit_offset;
+ uint32_t in_num_bits;
+ uint32_t out_bit_offset;
+ uint32_t out_num_bits;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ in_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ out_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ in_bit_offset =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ in_num_bits =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 16UL);
+ out_bit_offset =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 20UL);
+ out_num_bits =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 24UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_server_ready)
+ return _client->core.msg_server_ready(_state, service_id,
+ in_quota, out_quota,
+ in_bit_offset,
+ in_num_bits,
+ out_bit_offset,
+ out_num_bits);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_service_reset)
+ return _client->core.msg_service_reset(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_core *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+ case VSERVICE_CORE_CORE_ACK_CONNECT:
+ ret = core_core_handle_ack_connect(client, state, _mbuf);
+ break;
+ case VSERVICE_CORE_CORE_NACK_CONNECT:
+ ret = core_core_handle_nack_connect(client, state, _mbuf);
+ break;
+
+/* command in sync disconnect */
+ case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+ ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+ break;
+ case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+ ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+ break;
+
+/* message startup */
+ case VSERVICE_CORE_CORE_MSG_STARTUP:
+ ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+ break;
+
+/* message shutdown */
+ case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+ ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+ break;
+
+/* message service_created */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+ ret =
+ vs_client_core_core_handle_service_created(client, state,
+ _mbuf);
+ break;
+
+/* message service_removed */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+ ret =
+ vs_client_core_core_handle_service_removed(client, state,
+ _mbuf);
+ break;
+
+/* message server_ready */
+ case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+ ret =
+ vs_client_core_core_handle_server_ready(client, state,
+ _mbuf);
+ break;
+
+/* message service_reset */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ ret =
+ vs_client_core_core_handle_service_reset(client, state,
+ _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_core *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface core **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 000000000000..c3f36866a7f7
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the core server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+ struct vs_server_core *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->start)
+ server->start(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->reset)
+ server->reset(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->start)
+ server->start(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->reset)
+ server->reset(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_core_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = core_server_probe;
+ driver->vsdrv.remove = core_server_remove;
+ driver->vsdrv.receive = core_handle_message;
+ driver->vsdrv.notify = core_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ core_handle_start_bh : core_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ core_handle_reset_bh : core_handle_reset;
+ driver->vsdrv.tx_ready = core_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+ struct vs_core_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+ struct vs_server_core_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+ vs_server_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!service_name)
+ goto fail;
+ service_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+ if (!protocol_name)
+ goto fail;
+ protocol_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_ACK_CONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_NACK_CONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.req_connect)
+ return _server->core.req_connect(_state);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.req_disconnect)
+ return _server->core.req_disconnect(_state);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_OFFLINE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_STARTUP;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ core_in_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ core_out_quota;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+ case VSERVICE_CORE_STATE_CONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+ uint32_t service_id,
+ struct vs_string service_name,
+ struct vs_string protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+ {
+ size_t _size = strnlen(service_name.ptr, service_name.max_size);
+ if ((_size + sizeof(vs_message_id_t) + 4UL) >
+ VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ memset(service_name.ptr + _size, 0,
+ service_name.max_size - _size);
+ }
+ {
+ size_t _size =
+ strnlen(protocol_name.ptr, protocol_name.max_size);
+ if ((_size + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+ VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (_size < protocol_name.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+ }
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+ uint32_t service_id, uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ in_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+ out_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ in_bit_offset;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ in_num_bits;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ out_bit_offset;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ out_num_bits;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.msg_service_reset)
+ return _server->core.msg_service_reset(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_core *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+ case VSERVICE_CORE_CORE_REQ_CONNECT:
+ ret =
+ vs_server_core_core_handle_req_connect(server, state,
+ _mbuf);
+ break;
+
+/* command in sync disconnect */
+ case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+ ret =
+ vs_server_core_core_handle_req_disconnect(server, state,
+ _mbuf);
+ break;
+
+/* message service_reset */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ ret =
+ vs_server_core_core_handle_service_reset(server, state,
+ _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_core *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface core **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/Makefile b/drivers/vservices/protocol/serial/Makefile
new file mode 100644
index 000000000000..f5f29ed15921
--- /dev/null
+++ b/drivers/vservices/protocol/serial/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_CLIENT) += vservices_protocol_serial_client.o
+vservices_protocol_serial_client-objs = client.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_SERVER) += vservices_protocol_serial_server.o
+vservices_protocol_serial_server-objs = server.o
diff --git a/drivers/vservices/protocol/serial/client.c b/drivers/vservices/protocol/serial/client.c
new file mode 100644
index 000000000000..1c37e722ce24
--- /dev/null
+++ b/drivers/vservices/protocol/serial/client.c
@@ -0,0 +1,925 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the serial client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_serial_client_driver {
+ struct vs_client_serial *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_serial_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ _vs_client_serial_req_open(state);
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ _vs_client_serial_req_open(state);
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int serial_client_probe(struct vs_service_device *service);
+static int serial_client_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_client_register(struct vs_client_serial *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_serial_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = serial_client_probe;
+ driver->vsdrv.remove = serial_client_remove;
+ driver->vsdrv.receive = serial_handle_message;
+ driver->vsdrv.notify = serial_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ serial_handle_start_bh : serial_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ serial_handle_reset_bh : serial_handle_reset;
+ driver->vsdrv.tx_ready = serial_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_client_register);
+
+int vservice_serial_client_unregister(struct vs_client_serial *client)
+{
+ struct vs_serial_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_client_unregister);
+
+static int serial_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+ struct vs_client_serial_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int serial_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_OPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_open);
+static int _vs_client_serial_req_close(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_CLOSE;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_close);
+static int _vs_client_serial_req_reopen(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_REOPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_reopen);
+static int
+serial_base_handle_ack_open(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ _state->serial.packet_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ _state->packet_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ _client->opened(_state);
+ return 0;
+
+}
+
+static int
+serial_base_handle_nack_open(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+ "Open operation failed for device %s\n",
+ VS_STATE_SERVICE_PTR(_state)->name);
+
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_open);
+static int
+serial_base_handle_ack_close(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+static int
+serial_base_handle_nack_close(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_close);
+static int
+serial_base_handle_ack_reopen(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->reopened) {
+ _client->reopened(_state);
+ return 0;
+ }
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return _vs_client_serial_req_open(_state);
+
+}
+
+static int
+serial_base_handle_nack_reopen(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_reopen);
+struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!b)
+ goto fail;
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->size = _state->serial.packet_size;
+ b->max_size = b->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_alloc_msg);
+int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->max_size = b->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_getbufs_msg);
+int vs_client_serial_serial_free_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_free_msg);
+static int
+vs_client_serial_serial_handle_msg(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ struct vs_pbuf b;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b.max_size = b.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->serial.msg_msg)
+ return _client->serial.msg_msg(_state, b, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_handle_msg);
+int
+vs_client_serial_serial_send_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+ return -EINVAL;
+
+ if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (b.size < b.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ b.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_send_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_serial *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_SERIAL_BASE_ACK_OPEN:
+ ret = serial_base_handle_ack_open(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_OPEN:
+ ret = serial_base_handle_nack_open(client, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_SERIAL_BASE_ACK_CLOSE:
+ ret = serial_base_handle_ack_close(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_CLOSE:
+ ret = serial_base_handle_nack_close(client, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_SERIAL_BASE_ACK_REOPEN:
+ ret = serial_base_handle_ack_reopen(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_REOPEN:
+ ret = serial_base_handle_nack_reopen(client, state, _mbuf);
+ break;
+
+/** interface serial **/
+/* message msg */
+ case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+ ret = vs_client_serial_serial_handle_msg(client, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_serial *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface serial **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+int vs_client_serial_reopen(struct vs_client_serial_state *_state)
+{
+ return _vs_client_serial_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_reopen);
+
+int vs_client_serial_close(struct vs_client_serial_state *_state)
+{
+ return _vs_client_serial_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/server.c b/drivers/vservices/protocol/serial/server.c
new file mode 100644
index 000000000000..e5d10340618d
--- /dev/null
+++ b/drivers/vservices/protocol/serial/server.c
@@ -0,0 +1,1086 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the serial server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_serial_server_driver {
+ struct vs_server_serial *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_serial_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int serial_server_probe(struct vs_service_device *service);
+static int serial_server_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_server_register(struct vs_server_serial *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_serial_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_SERIAL_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_SERIAL_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = serial_server_probe;
+ driver->vsdrv.remove = serial_server_remove;
+ driver->vsdrv.receive = serial_handle_message;
+ driver->vsdrv.notify = serial_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ serial_handle_start_bh : serial_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ serial_handle_reset_bh : serial_handle_reset;
+ driver->vsdrv.tx_ready = serial_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_server_register);
+
+int vservice_serial_server_unregister(struct vs_server_serial *server)
+{
+ struct vs_serial_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_server_unregister);
+
+static int serial_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+ struct vs_server_serial_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int serial_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+static int
+vs_server_serial_send_ack_open(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_OPEN;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _state->packet_size;
+ _state->serial.packet_size = _state->packet_size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_open);
+static int
+vs_server_serial_send_nack_open(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_open);
+static int
+vs_server_serial_send_ack_close(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_close);
+static int
+vs_server_serial_send_nack_close(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_close);
+static int
+vs_server_serial_send_ack_reopen(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_reopen);
+static int
+vs_server_serial_send_nack_reopen(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_reopen);
+static int
+vs_server_serial_handle_req_open(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->open)
+ return vs_server_serial_open_complete(_state,
+ _server->open(_state));
+ return vs_server_serial_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_serial_send_ack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_serial_send_nack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_open_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_open);
+static int
+vs_server_serial_handle_req_close(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->close)
+ return vs_server_serial_close_complete(_state,
+ _server->close(_state));
+ return vs_server_serial_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_close_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_serial_send_ack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_serial_send_nack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+ wake_up_all(&_state->service->quota_wq);
+ }
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_close_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_close);
+static int
+vs_server_serial_handle_req_reopen(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->reopen)
+ return vs_server_serial_reopen_complete(_state,
+ _server->
+ reopen(_state));
+ else
+ return vs_server_serial_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC
+ : GFP_KERNEL);
+
+}
+
+int vs_server_serial_reopen_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS) {
+ ret =
+ vs_server_serial_send_ack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ } else if (resp == VS_SERVER_RESP_FAILURE) {
+ ret =
+ vs_server_serial_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ }
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_reopen);
+struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!b)
+ goto fail;
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->size = _state->serial.packet_size;
+ b->max_size = b->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_alloc_msg);
+int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->max_size = b->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_getbufs_msg);
+int vs_server_serial_serial_free_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_free_msg);
+int
+vs_server_serial_serial_send_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+ return -EINVAL;
+
+ if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (b.size < b.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ b.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_send_msg);
+static int
+vs_server_serial_serial_handle_msg(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ struct vs_pbuf b;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b.max_size = b.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_server->serial.msg_msg)
+ return _server->serial.msg_msg(_state, b, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_handle_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_serial *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_SERIAL_BASE_REQ_OPEN:
+ ret = vs_server_serial_handle_req_open(server, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_SERIAL_BASE_REQ_CLOSE:
+ ret = vs_server_serial_handle_req_close(server, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_SERIAL_BASE_REQ_REOPEN:
+ ret = vs_server_serial_handle_req_reopen(server, state, _mbuf);
+ break;
+
+/** interface serial **/
+/* message msg */
+ case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+ ret = vs_server_serial_serial_handle_msg(server, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_serial *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface serial **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 000000000000..d695184b0f68
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2913 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+ struct vs_session_device *session;
+ struct vs_service_device *service;
+
+ mutex_lock(&vs_session_lock);
+ session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+ get_device(&session->dev);
+ mutex_unlock(&vs_session_lock);
+
+ service = vs_session_get_service(session,
+ MINOR(dev) % VS_MAX_SERVICES);
+ put_device(&session->dev);
+
+ return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+ int (*fn)(struct vs_session_device *session, void *data);
+ void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+ struct vs_session_for_each_data *data =
+ (struct vs_session_for_each_data *)_data;
+ return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data)
+{
+ struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+ lockdep_assert_held(&vs_session_lock);
+
+ return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+ &priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+ return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+ struct vs_session_device *session = container_of(work,
+ struct vs_session_device, fatal_error_work);
+
+ session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+ schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From To Trigger
+ * INIT DISABLED vs_service_start
+ * DISABLED RESET vs_service_enable (disable_count -> 0)
+ * RESET READY End of throttle delay (may be 0)
+ * READY ACTIVE Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ * LOCAL_RESET vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ * RESET vs_service_handle_reset (server)
+ * RESET DISABLED vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ * DISABLED vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ * DISABLED vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ * LOCAL_DELETE vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ * DELETED vs_service_delete
+ * LOCAL_DELETE DELETED vs_service_handle_reset
+ * vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+ VS_SERVICE_INIT,
+ VS_SERVICE_DISABLED,
+ VS_SERVICE_RESET,
+ VS_SERVICE_READY,
+ VS_SERVICE_ACTIVE,
+ VS_SERVICE_LOCAL_RESET,
+ VS_SERVICE_LOCAL_DELETE,
+ VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+ VS_SESSION_RESET,
+ VS_SESSION_ACTIVATE,
+ VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+
+ WARN_ON(!service->protocol);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_INIT) {
+ if (service->readiness != VS_SERVICE_DELETED)
+ dev_err(&service->dev,
+ "start called from invalid state %d\n",
+ service->readiness);
+ mutex_unlock(&service->ready_lock);
+ return false;
+ }
+
+ if (service->id != 0 && session_drv->service_added) {
+ int err = session_drv->service_added(session, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to add service %d: %d\n",
+ service->id, err);
+ mutex_unlock(&service->ready_lock);
+ return false;
+ }
+ }
+
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count = 1;
+ service->last_reset_request = jiffies;
+
+ mutex_unlock(&service->ready_lock);
+
+ /* Tell userspace about the service. */
+ dev_set_uevent_suppress(&service->dev, false);
+ kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_transport *transport;
+ int err;
+ struct vs_service_driver *driver;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* We can't start if the service is not ready yet. */
+ if (service->readiness != VS_SERVICE_READY)
+ return;
+
+ /*
+ * There should never be anything in the RX queue at this point.
+ * If there is, it can seriously confuse the service drivers for
+ * no obvious reason, so we check.
+ */
+ if (WARN_ON(!list_empty(&service->rx_queue)))
+ cancel_pending_rx(service);
+
+ if (!service->driver_probed) {
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready with no driver\n");
+ return;
+ }
+
+ /* Prepare the transport to support the service. */
+ transport = session->transport;
+ err = transport->vt->service_start(transport, service);
+
+ if (err < 0) {
+ /* fatal error attempting to start; reset and try again */
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset_request = jiffies;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ return;
+ }
+
+ service->readiness = VS_SERVICE_ACTIVE;
+
+ driver = to_vs_service_driver(service->dev.driver);
+ if (driver->start)
+ driver->start(service);
+
+ if (service->id && session_drv->service_start) {
+ err = session_drv->service_start(session, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+ dev_name(&service->dev),
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ }
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ __try_start_service(service);
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, ready_work.work);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready work - last reset request was %u ms ago\n",
+ msecs_ago(service->last_reset_request));
+
+ /*
+ * Make sure there's no reset work pending from an earlier driver
+ * failure. We should already be inactive at this point, so it's safe
+ * to just cancel it.
+ */
+ cancel_work_sync(&service->reset_work);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_RESET) {
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready work found readiness of %d, doing nothing\n",
+ service->readiness);
+ mutex_unlock(&service->ready_lock);
+ return;
+ }
+
+ service->readiness = VS_SERVICE_READY;
+ /* Record the time at which this happened, for throttling. */
+ service->last_ready = jiffies;
+
+ /* Tell userspace that the service is ready. */
+ kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+ /* Start the service, if it has a driver attached. */
+ __try_start_service(service);
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service: The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+ bool notify_remote)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_service_driver *driver = NULL;
+ struct vs_transport *transport;
+ int err;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* If we're already inactive, there's nothing to do. */
+ if (service->readiness != VS_SERVICE_ACTIVE)
+ return;
+
+ service->last_reset = jiffies;
+ service->readiness = VS_SERVICE_LOCAL_RESET;
+
+ cancel_pending_rx(service);
+
+ if (!WARN_ON(!service->driver_probed))
+ driver = to_vs_service_driver(service->dev.driver);
+
+ if (driver && driver->reset)
+ driver->reset(service);
+
+ wake_up_all(&service->quota_wq);
+
+ transport = vs_service_get_session(service)->transport;
+
+ /*
+ * Ask the transport to reset the service. If this returns a positive
+ * value, we need to leave the service disabled, and the transport
+ * will re-enable it. To avoid allowing the disable count to go
+ * negative if that re-enable races with this callback returning, we
+ * disable the service beforehand and re-enable it if the callback
+ * returns zero.
+ */
+ service->disable_count++;
+ err = transport->vt->service_reset(transport, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ } else if (!err) {
+ err = __enable_service(service);
+ }
+
+ if (notify_remote) {
+ if (service->id) {
+ err = session_drv->service_local_reset(session,
+ service);
+ if (err == VS_SERVICE_ALREADY_RESET) {
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ } else if (err < 0) {
+ dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ } else {
+ session->transport->vt->reset(session->transport);
+ }
+ }
+
+ /* Tell userspace that the service is no longer active. */
+ kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+ __acquires(service->ready_lock)
+{
+ service->last_reset_request = jiffies;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ __reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+ struct vs_service_device *caller)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ if (caller != service && caller != service->owner) {
+ struct vs_service_device *core_service = session->core_service;
+
+ WARN_ON(!core_service);
+ if (caller != core_service)
+ return -EPERM;
+ }
+
+ reset_service(service);
+ /* reset_service returns with ready_lock held, but we don't need it */
+ mutex_unlock(&service->ready_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+ service->pending_reset = true;
+ schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+ sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ mutex_lock(&session->service_idr_lock);
+ idr_remove(&session->service_idr, service->id);
+ mutex_unlock(&session->service_idr_lock);
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+ bool notify_remote)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+ int err;
+
+ lockdep_assert_held(&service->ready_lock);
+ WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+ /* Notify the core service and transport that the service is gone */
+ session->transport->vt->service_remove(session->transport, service);
+ if (notify_remote && service->id && session_drv->service_removed) {
+ err = session_drv->service_removed(session, service);
+ if (err < 0) {
+ dev_err(&session->dev,
+ "Failed to remove service %d: %d\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ }
+
+ /*
+ * At this point the service is guaranteed to be gone on the client
+ * side, so we can safely release the service ID.
+ */
+ if (session->is_server)
+ vs_session_release_service_id(service);
+
+ /*
+ * This guarantees that any concurrent vs_session_get_service() that
+ * found the service before we removed it from the IDR will take a
+ * reference before we release ours.
+ *
+ * This similarly protects for_each_[usable_]service().
+ */
+ synchronize_rcu();
+
+ /* Matches device_initialize() in vs_service_register() */
+ put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ switch(service->readiness) {
+ case VS_SERVICE_INIT:
+ case VS_SERVICE_DELETED:
+ case VS_SERVICE_LOCAL_DELETE:
+ dev_err(&service->dev, "disabled while uninitialised\n");
+ break;
+ case VS_SERVICE_ACTIVE:
+ dev_err(&service->dev, "disabled while active\n");
+ break;
+ case VS_SERVICE_LOCAL_RESET:
+ /*
+ * Will go to DISABLED state when reset completes, unless
+ * it's being forced (i.e. we're moving to a core protocol
+ * state that implies everything else is reset).
+ */
+ if (force)
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count++;
+ break;
+ default:
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count++;
+ break;
+ }
+
+ cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+ struct vs_service_device *target, bool disable)
+{
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ int err = 0;
+
+ mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+ switch (target->readiness) {
+ case VS_SERVICE_LOCAL_DELETE:
+ target->readiness = VS_SERVICE_DELETED;
+ destroy_service(target, true);
+ break;
+ case VS_SERVICE_ACTIVE:
+ /*
+ * Reset the service and send a reset notification.
+ *
+ * We only send notifications for non-core services. This is
+ * because core notifies by sending a transport reset, which
+ * is what brought us here in the first place. Note that we
+ * must already hold the core service state lock iff the
+ * target is non-core.
+ */
+ target->last_reset_request = jiffies;
+ __reset_service(target, target->id != 0);
+ /* fall through */
+ case VS_SERVICE_LOCAL_RESET:
+ target->readiness = target->disable_count ?
+ VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+ if (disable)
+ disable_service(target, false);
+ if (target->readiness != VS_SERVICE_DISABLED)
+ queue_ready_work(target);
+ break;
+ case VS_SERVICE_READY:
+ /* Tell userspace that the service is no longer ready. */
+ kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+ /* fall through */
+ case VS_SERVICE_RESET:
+ /*
+ * This can happen for a non-core service if we get a reset
+ * request from the server on the client side, after the
+ * client has enabled the service but before it is active.
+ * Note that the service is already active on the server side
+ * at this point. The client's delay may be due to either
+ * reset throttling or the absence of a driver.
+ *
+ * We bump the reset request timestamp, disable the service
+ * again, and send back an acknowledgement.
+ */
+ if (disable && target->id) {
+ target->last_reset_request = jiffies;
+
+ err = session_drv->service_local_reset(
+ session, target);
+ if (err < 0) {
+ dev_err(&session->dev,
+ "Failed to reset service %d; %d\n",
+ target->id, err);
+ session_fatal_error(session,
+ GFP_KERNEL);
+ }
+
+ disable_service(target, false);
+ break;
+ }
+ /* fall through */
+ case VS_SERVICE_DISABLED:
+ /*
+ * This can happen for the core service if we get a reset
+ * before the transport has activated, or before the core
+ * service has become ready.
+ *
+ * We bump the reset request timestamp, and disable the
+ * service again if the transport had already activated and
+ * enabled it.
+ */
+ if (disable && !target->id) {
+ target->last_reset_request = jiffies;
+
+ if (target->readiness != VS_SERVICE_DISABLED)
+ disable_service(target, false);
+
+ break;
+ }
+ /* fall through */
+ default:
+ dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+ target->readiness);
+ err = -EPROTO;
+ break;
+ }
+
+ mutex_unlock(&target->ready_lock);
+ return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+ vs_service_id_t service_id, bool disable)
+{
+ struct vs_service_device *target;
+ int ret;
+
+ if (!service_id)
+ return -EINVAL;
+
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -ENODEV;
+
+ ret = service_handle_reset(session, target, disable);
+ vs_put_service(target);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->disable_count))
+ return -EINVAL;
+
+ if (--service->disable_count > 0)
+ return 0;
+
+ /*
+ * If the service is still resetting, it can't become ready until the
+ * reset completes. If it has been deleted, it will never become
+ * ready. In either case, there's nothing more to do.
+ */
+ if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+ (service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+ (service->readiness == VS_SERVICE_DELETED))
+ return 0;
+
+ if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+ return -EINVAL;
+
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ * vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+ int ret;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ ret = __enable_service(service);
+
+ mutex_unlock(&service->ready_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+ bool rx_atomic;
+
+ rx_atomic = vs_service_has_atomic_rx(service);
+ vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+ &service->dev, "Queuing rx %s\n",
+ rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+ if (rx_atomic)
+ tasklet_schedule(&service->rx_tasklet);
+ else
+ queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+ struct vs_mbuf *mbuf;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ cancel_work_sync(&service->rx_work);
+ tasklet_kill(&service->rx_tasklet);
+
+ spin_lock_irq(&service->rx_lock);
+ while (!list_empty(&service->rx_queue)) {
+ mbuf = list_first_entry(&service->rx_queue,
+ struct vs_mbuf, queue);
+ list_del_init(&mbuf->queue);
+ spin_unlock_irq(&service->rx_lock);
+ vs_service_free_mbuf(service, mbuf);
+ spin_lock_irq(&service->rx_lock);
+ }
+ service->tx_ready = false;
+ spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, cooloff_work.work);
+ struct vs_session_device *session = vs_service_get_session(service);
+ unsigned long current_time = jiffies, wake_time;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (reset_throttle_cooled_off(service)) {
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+ jiffies_to_msecs(service->reset_delay),
+ jiffies_to_msecs(reset_cool_off(service)),
+ msecs_ago(service->last_reset),
+ msecs_ago(service->last_reset_request));
+
+ service->reset_delay = 0;
+
+ /*
+ * If the service is already in reset, then queue_ready_work
+ * has already run and has deferred queuing of the ready_work
+ * until cooloff. Schedule the ready work to run immediately.
+ */
+ if (service->readiness == VS_SERVICE_RESET)
+ schedule_delayed_work(&service->ready_work, 0);
+ } else {
+ /*
+ * This can happen if last_reset_request has been bumped
+ * since the cooloff work was first queued. We need to
+ * work out how long it is until the service cools off,
+ * then reschedule ourselves.
+ */
+ wake_time = reset_cool_off(service) +
+ service->last_reset_request;
+
+ WARN_ON(time_after(current_time, wake_time));
+
+ schedule_delayed_work(&service->cooloff_work,
+ wake_time - current_time);
+ }
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, reset_work);
+
+ service->pending_reset = false;
+
+ vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+ struct vs_service_driver *driver =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_session_device *session = vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ struct vs_service_stats *stats = &service->stats;
+ struct vs_mbuf *mbuf;
+ size_t size;
+ int ret;
+
+ /* Don't do rx work unless the service is active */
+ if (service->readiness != VS_SERVICE_ACTIVE)
+ return false;
+
+ /* Atomically take an item from the queue */
+ spin_lock_irq(&service->rx_lock);
+ if (!list_empty(&service->rx_queue)) {
+ mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+ queue);
+ list_del_init(&mbuf->queue);
+ spin_unlock_irq(&service->rx_lock);
+ size = vt->mbuf_size(mbuf);
+
+ /*
+ * Call the message handler for the service. The service's
+ * message handler is responsible for freeing the mbuf when it
+ * is done with it.
+ */
+ ret = driver->receive(service, mbuf);
+ if (ret < 0) {
+ atomic_inc(&service->stats.recv_failures);
+ dev_err(&service->dev,
+ "receive returned %d; resetting service\n",
+ ret);
+ vs_service_reset_nosync(service);
+ return false;
+ } else {
+ atomic_add(size, &service->stats.recv_bytes);
+ atomic_inc(&service->stats.recv_mbufs);
+ }
+
+ } else if (service->tx_ready) {
+ service->tx_ready = false;
+ spin_unlock_irq(&service->rx_lock);
+
+ /*
+ * Update the tx_ready stats accounting and then call the
+ * service's tx_ready handler.
+ */
+ atomic_inc(&stats->nr_tx_ready);
+ if (atomic_read(&stats->nr_over_quota) > 0) {
+ int total;
+
+ total = atomic_add_return(jiffies_to_msecs(jiffies -
+ stats->over_quota_time),
+ &stats->over_quota_time_total);
+ atomic_set(&stats->over_quota_time_avg, total /
+ atomic_read(&stats->nr_over_quota));
+ }
+ atomic_set(&service->is_over_quota, 0);
+
+ /*
+ * Note that a service's quota may reduce at any point, even
+ * during the tx_ready handler. This is important if a service
+ * has an ordered list of pending messages to send. If a
+ * message fails to send from the tx_ready handler due to
+ * over-quota then subsequent messages in the same handler may
+ * send successfully. To avoid sending messages in the
+ * incorrect order the service's tx_ready handler should
+ * return immediately if a message fails to send.
+ */
+ ret = driver->tx_ready(service);
+ if (ret < 0) {
+ dev_err(&service->dev,
+ "tx_ready returned %d; resetting service\n",
+ ret);
+ vs_service_reset_nosync(service);
+ return false;
+ }
+ } else {
+ spin_unlock_irq(&service->rx_lock);
+ }
+
+ /*
+ * There's no need to lock for this list_empty: if we race
+ * with a msg enqueue, we'll be rescheduled by the other side,
+ * and if we race with a dequeue, we'll just do nothing when
+ * we run (or will be cancelled before we run).
+ */
+ return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+ struct vs_service_device *service = (struct vs_service_device *)data;
+ bool resched;
+
+ /*
+ * There is no need to acquire the state spinlock or mutex here,
+ * because this tasklet is disabled when the lock is held. These
+ * are annotations for sparse and lockdep, respectively.
+ *
+ * We can't annotate the implicit mutex acquire because lockdep gets
+ * upset about inconsistent softirq states.
+ */
+ __acquire(service);
+ spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+ resched = dequeue_and_handle_received_message(service);
+
+ if (resched)
+ tasklet_schedule(&service->rx_tasklet);
+
+ spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+ __release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, rx_work);
+ bool requeue;
+
+ /*
+ * We must acquire the state mutex here to protect services that
+ * are using vs_service_state_lock().
+ *
+ * There is no need to acquire the spinlock, which is never used in
+ * drivers with task context receive handlers.
+ */
+ vs_service_state_lock(service);
+
+ requeue = dequeue_and_handle_received_message(service);
+
+ vs_service_state_unlock(service);
+
+ if (requeue)
+ queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name) \
+ static ssize_t service_stat_##__name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct vs_service_device *service = \
+ to_vs_service_device(dev); \
+ \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ atomic_read(&service->stats.__name)); \
+ } \
+ static DEVICE_ATTR(__name, S_IRUGO, \
+ service_stat_##__name##_show, NULL);
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+ &dev_attr_sent_mbufs.attr,
+ &dev_attr_sent_bytes.attr,
+ &dev_attr_recv_mbufs.attr,
+ &dev_attr_recv_bytes.attr,
+ &dev_attr_nr_over_quota.attr,
+ &dev_attr_nr_tx_ready.attr,
+ &dev_attr_over_quota_time_total.attr,
+ &dev_attr_over_quota_time_avg.attr,
+ NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+ .name = "stats",
+ .attrs = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ bool notify_on_destroy = true;
+
+ /* FIXME: Jira ticket SDK-3495 - philipd. */
+ /* This should be the caller's responsibility */
+ vs_get_service(service);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ /*
+ * If we're on the client side, the service should already have been
+ * disabled at this point.
+ */
+ WARN_ON(service->id != 0 && !session->is_server &&
+ service->readiness != VS_SERVICE_DISABLED &&
+ service->readiness != VS_SERVICE_DELETED);
+
+ /*
+ * Make sure the service is not active, and notify the remote end if
+ * it needs to be reset. Note that we already hold the core service
+ * state lock iff this is a non-core service.
+ */
+ __reset_service(service, true);
+
+ /*
+ * If the remote end is aware that the service is inactive, we can
+ * delete right away; otherwise we need to wait for a notification
+ * that the service has reset.
+ */
+ switch (service->readiness) {
+ case VS_SERVICE_LOCAL_DELETE:
+ case VS_SERVICE_DELETED:
+ /* Nothing to do here */
+ mutex_unlock(&service->ready_lock);
+ vs_put_service(service);
+ return;
+ case VS_SERVICE_ACTIVE:
+ BUG();
+ break;
+ case VS_SERVICE_LOCAL_RESET:
+ service->readiness = VS_SERVICE_LOCAL_DELETE;
+ break;
+ case VS_SERVICE_INIT:
+ notify_on_destroy = false;
+ /* Fall through */
+ default:
+ service->readiness = VS_SERVICE_DELETED;
+ destroy_service(service, notify_on_destroy);
+ break;
+ }
+
+ mutex_unlock(&service->ready_lock);
+
+ /*
+ * Remove service syslink from
+ * sys/vservices/(<server>/<client>)-sessions/ directory
+ */
+ vs_service_remove_sysfs_entries(session, service);
+
+ sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+ /*
+ * On the client-side we need to release the service id as soon as
+ * the service is deleted. Otherwise the server may attempt to create
+ * a new service with this id.
+ */
+ if (!session->is_server)
+ vs_session_release_service_id(service);
+
+ device_del(&service->dev);
+ vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+ struct vs_service_device *caller)
+{
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ struct vs_service_device *core_service = session->core_service;
+
+ if (WARN_ON(!core_service))
+ return -ENODEV;
+
+ if (!service->id)
+ return -EINVAL;
+
+ if (caller != service->owner && caller != core_service)
+ return -EPERM;
+
+ delete_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(service);
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+
+ lockdep_assert_held(&core_service->state_mutex);
+
+ delete_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, cleanup_work);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+ if (service->owner)
+ vs_put_service(service->owner);
+
+ /* Put our reference to the session */
+ if (service->dev.parent)
+ put_device(service->dev.parent);
+
+ tasklet_kill(&service->rx_tasklet);
+ cancel_work_sync(&service->rx_work);
+ cancel_delayed_work_sync(&service->cooloff_work);
+ cancel_delayed_work_sync(&service->ready_work);
+ cancel_work_sync(&service->reset_work);
+
+ if (service->work_queue)
+ destroy_workqueue(service->work_queue);
+
+ kfree(service->sysfs_name);
+ kfree(service->name);
+ kfree(service->protocol);
+ kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+ &service->dev, "release\n");
+
+ /*
+ * We need to defer cleanup to avoid a circular dependency between the
+ * core service's state lock (which can be held at this point, on the
+ * client side) and any non-core service's reset work (which we must
+ * cancel here, and which acquires the core service state lock).
+ */
+ schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+ struct vs_service_device *service, vs_service_id_t service_id)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+ int err, base_id, id;
+
+ if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID)
+ base_id = 1;
+ else
+ base_id = service_id;
+
+retry:
+ if (!idr_pre_get(&session->service_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&session->service_idr_lock);
+ err = idr_get_new_above(&session->service_idr, service, base_id, &id);
+ if (err == 0) {
+ if (service_id != VS_SERVICE_AUTO_ALLOCATE_ID &&
+ id != service_id) {
+ /* Failed to allocated the requested service id */
+ idr_remove(&session->service_idr, id);
+ mutex_unlock(&session->service_idr_lock);
+ return -EBUSY;
+ }
+ if (id > VS_MAX_SERVICE_ID) {
+ /* We are out of service ids */
+ idr_remove(&session->service_idr, id);
+ mutex_unlock(&session->service_idr_lock);
+ return -ENOSPC;
+ }
+ }
+ mutex_unlock(&session->service_idr_lock);
+ if (err == -EAGAIN)
+ goto retry;
+ if (err < 0)
+ return err;
+#else
+ int start, end, id;
+
+ if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+ start = 1;
+ end = VS_MAX_SERVICES;
+ } else {
+ start = service_id;
+ end = service_id + 1;
+ }
+
+ mutex_lock(&session->service_idr_lock);
+ id = idr_alloc(&session->service_idr, service, start, end,
+ GFP_KERNEL);
+ mutex_unlock(&session->service_idr_lock);
+
+ if (id == -ENOSPC)
+ return -EBUSY;
+ else if (id < 0)
+ return id;
+#endif
+
+ service->id = id;
+ return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+ struct vs_service_device *service, vs_service_id_t id)
+{
+ int ret;
+ char *sysfs_name, *c;
+
+ /* Add a symlink to session device inside service device sysfs */
+ ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+ VS_SESSION_SYMLINK_NAME);
+ if (ret) {
+ dev_err(&service->dev, "Error %d creating session symlink\n",
+ ret);
+ goto fail;
+ }
+
+ /* Get the length of the string for sysfs dir */
+ sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+ if (!sysfs_name) {
+ ret = -ENOMEM;
+ goto fail_session_link;
+ }
+
+ /*
+ * We dont want to create symlinks with /'s which could get interpreted
+ * as another directory so replace all /'s with !'s
+ */
+ while ((c = strchr(sysfs_name, '/')))
+ *c = '!';
+ ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+ sysfs_name);
+ if (ret)
+ goto fail_free_sysfs_name;
+
+ service->sysfs_name = sysfs_name;
+
+ return 0;
+
+fail_free_sysfs_name:
+ kfree(sysfs_name);
+fail_session_link:
+ sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+ return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+ struct vs_service_device *owner, vs_service_id_t service_id,
+ const char *protocol, const char *name, const void *plat_data)
+{
+ struct vs_service_device *service;
+ struct vs_session_driver *session_drv;
+ int ret = -EIO;
+ char *c;
+
+ if (service_id && !owner) {
+ dev_err(&session->dev, "Non-core service must have an owner\n");
+ ret = -EINVAL;
+ goto fail;
+ } else if (!service_id && owner) {
+ dev_err(&session->dev, "Core service must not have an owner\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!session->dev.driver)
+ goto fail;
+
+ session_drv = to_vs_session_driver(session->dev.driver);
+
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&service->rx_queue);
+ INIT_WORK(&service->rx_work, service_rx_work);
+ INIT_WORK(&service->reset_work, service_reset_work);
+ INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+ INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+ INIT_WORK(&service->cleanup_work, service_cleanup_work);
+ spin_lock_init(&service->rx_lock);
+ init_waitqueue_head(&service->quota_wq);
+
+ service->owner = vs_get_service(owner);
+
+ service->readiness = VS_SERVICE_INIT;
+ mutex_init(&service->ready_lock);
+ service->driver_probed = false;
+
+ /*
+ * Service state locks - A service is only allowed to use one of these
+ */
+ spin_lock_init(&service->state_spinlock);
+ mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ service->state_spinlock_used = false;
+ service->state_mutex_used = false;
+#endif
+
+ /* Lock ordering
+ *
+ * The dependency order for the various service locks is as follows:
+ *
+ * cooloff_work
+ * reset_work
+ * ready_work
+ * ready_lock/0
+ * rx_work/0
+ * state_mutex/0
+ * ready_lock/1
+ * ...
+ * state_mutex/n
+ * state_spinlock
+ *
+ * The subclass is the service's rank in the hierarchy of
+ * service ownership. This results in core having subclass 0 on
+ * server-side and 1 on client-side. Services directly created
+ * by the core will have a lock subclass value of 2 for
+ * servers, 3 for clients. Services created by non-core
+ * services will have a lock subclass value of x + 1, where x
+ * is the lock subclass of the creator service. (e.g servers
+ * will have even numbered lock subclasses, clients will have
+ * odd numbered lock subclasses).
+ *
+ * If a service driver has any additional locks for protecting
+ * internal state, they will generally fit between state_mutex/n and
+ * ready_lock/n+1 on this list. For the core service, this applies to
+ * the session lock.
+ */
+
+ if (owner)
+ service->lock_subclass = owner->lock_subclass + 2;
+ else
+ service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+ if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+ dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+ } else {
+ /*
+ * We need to set the default subclass for the rx work,
+ * because the workqueue API doesn't (and can't) provide
+ * anything like lock_nested() for it.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ /*
+ * Lockdep allows a specific lock's subclass to be set with
+ * the subclass argument to lockdep_init_map(). However, prior
+ * to Linux 3.3, that only works the first time it is called
+ * for a given class and subclass. So we have to fake it,
+ * putting every subclass in a different class, so the only
+ * thing that breaks is printing the subclass in lockdep
+ * warnings.
+ */
+ static struct lock_class_key
+ rx_work_keys[MAX_LOCKDEP_SUBCLASSES];
+ struct lock_class_key *key =
+ &rx_work_keys[service->lock_subclass];
+#else
+ struct lock_class_key *key = service->rx_work.lockdep_map.key;
+#endif
+
+ /*
+ * We can't use the lockdep_set_class() macro because the
+ * work's lockdep map is called .lockdep_map instead of
+ * .dep_map.
+ */
+ lockdep_init_map(&service->rx_work.lockdep_map,
+ "&service->rx_work", key,
+ service->lock_subclass);
+ }
+#endif
+
+ /*
+ * Copy the protocol and name. Remove any leading or trailing
+ * whitespace characters (including newlines) since the strings
+ * may have been passed via sysfs files.
+ */
+ if (protocol) {
+ service->protocol = kstrdup(protocol, GFP_KERNEL);
+ if (!service->protocol) {
+ ret = -ENOMEM;
+ goto fail_copy_protocol;
+ }
+ c = strim(service->protocol);
+ if (c != service->protocol)
+ memmove(service->protocol, c,
+ strlen(service->protocol) + 1);
+ }
+
+ service->name = kstrdup(name, GFP_KERNEL);
+ if (!service->name) {
+ ret = -ENOMEM;
+ goto fail_copy_name;
+ }
+ c = strim(service->name);
+ if (c != service->name)
+ memmove(service->name, c, strlen(service->name) + 1);
+
+ service->is_server = session_drv->is_server;
+
+ /* Grab a reference to the session we are on */
+ service->dev.parent = get_device(&session->dev);
+ service->dev.bus = session_drv->service_bus;
+ service->dev.release = vs_service_release;
+
+ service->last_reset = 0;
+ service->last_reset_request = 0;
+ service->last_ready = 0;
+ service->reset_delay = 0;
+
+ device_initialize(&service->dev);
+ service->dev.platform_data = (void *)plat_data;
+
+ ret = service_add_idr(session, service, service_id);
+ if (ret)
+ goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+ /* Integrate session and service names in vservice devnodes */
+ dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+ session->is_server ? "server" : "client",
+ session->name, service->name,
+ session->session_num, service->id);
+#else
+ dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+ service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ if (service->id > 0)
+ service->dev.devt = MKDEV(vservices_cdev_major,
+ (session->session_num * VS_MAX_SERVICES) +
+ service->id);
+#endif
+
+ service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+ if (!service->work_queue) {
+ ret = -ENOMEM;
+ goto fail_create_workqueue;
+ }
+
+ tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+ (unsigned long)service);
+
+ /*
+ * If this is the core service, set the core service pointer in the
+ * session.
+ */
+ if (service->id == 0) {
+ mutex_lock(&session->service_idr_lock);
+ if (session->core_service) {
+ ret = -EEXIST;
+ mutex_unlock(&session->service_idr_lock);
+ goto fail_become_core;
+ }
+
+ /* Put in vs_session_bus_remove() */
+ session->core_service = vs_get_service(service);
+ mutex_unlock(&session->service_idr_lock);
+ }
+
+ /* Notify the transport */
+ ret = session->transport->vt->service_add(session->transport, service);
+ if (ret) {
+ dev_err(&session->dev,
+ "Failed to add service %d (%s:%s) to transport: %d\n",
+ service->id, service->name,
+ service->protocol, ret);
+ goto fail_transport_add;
+ }
+
+ /* Delay uevent until vs_service_start(). */
+ dev_set_uevent_suppress(&service->dev, true);
+
+ ret = device_add(&service->dev);
+ if (ret)
+ goto fail_device_add;
+
+ /* Create the service statistics sysfs group */
+ ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+ if (ret)
+ goto fail_sysfs_create_group;
+
+ /* Create additional sysfs files */
+ ret = vs_service_create_sysfs_entries(session, service, service->id);
+ if (ret)
+ goto fail_sysfs_add_entries;
+
+ return service;
+
+fail_sysfs_add_entries:
+ sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+ device_del(&service->dev);
+fail_device_add:
+ session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+ if (service->id == 0) {
+ session->core_service = NULL;
+ vs_put_service(service);
+ }
+fail_become_core:
+fail_create_workqueue:
+ vs_session_release_service_id(service);
+fail_add_idr:
+ /*
+ * device_initialize() has been called, so we must call put_device()
+ * and let vs_service_release() handle the rest of the cleanup.
+ */
+ put_device(&service->dev);
+ return ERR_PTR(ret);
+
+fail_copy_name:
+ if (service->protocol)
+ kfree(service->protocol);
+fail_copy_protocol:
+ kfree(service);
+fail:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+ vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+
+ if (!session)
+ return NULL;
+
+ rcu_read_lock();
+ service = idr_find(&session->service_idr, service_id);
+ if (!service) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *))
+{
+ struct vs_service_device *service;
+ int id;
+
+ for (id = 1; ; id++) {
+ rcu_read_lock();
+ service = idr_get_next(&session->service_idr, &id);
+ if (!service) {
+ rcu_read_unlock();
+ break;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ func(service);
+ vs_put_service(service);
+ }
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+
+ lockdep_assert_held(&core_service->state_mutex);
+
+ vs_session_disable_noncore(session);
+
+ __for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *, void *), void *data)
+{
+ struct vs_service_device *service;
+ int id;
+
+ for (id = 1; ; id++) {
+ rcu_read_lock();
+ service = idr_get_next(&session->service_idr, &id);
+ if (!service) {
+ rcu_read_unlock();
+ break;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+ service->readiness != VS_SERVICE_DELETED &&
+ service->readiness != VS_SERVICE_INIT)
+ func(service, data);
+
+ mutex_unlock(&service->ready_lock);
+ vs_put_service(service);
+ }
+}
+
+static void force_disable_service(struct vs_service_device *service,
+ void *unused)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ if (service->readiness == VS_SERVICE_ACTIVE)
+ __reset_service(service, false);
+
+ disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+ vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ __enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+ vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+ struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_transport *transport;
+ unsigned long flags;
+
+ transport = session->transport;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ dev_err(&session->dev, "message for unknown service %d\n",
+ service_id);
+ session_fatal_error(session, GFP_ATOMIC);
+ return -ENOTCONN;
+ }
+
+ /*
+ * Take the rx lock before checking service readiness. This guarantees
+ * that if __reset_service() has just made the service inactive, we
+ * either see it and don't enqueue the message, or else enqueue the
+ * message before cancel_pending_rx() runs (and removes it).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the message. */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return -ECONNRESET;
+ }
+
+ list_add_tail(&mbuf->queue, &service->rx_queue);
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+
+ /* Schedule processing of the message by the service's drivers. */
+ queue_rx_work(service);
+ vs_put_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ * freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+ vs_service_id_t service_id, unsigned count,
+ bool send_tx_ready)
+{
+ struct vs_service_device *service;
+ unsigned long flags;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ dev_err(&session->dev, "tx ready for unknown service %d\n",
+ service_id);
+ session_fatal_error(session, GFP_ATOMIC);
+ return;
+ }
+
+ wake_up_nr(&service->quota_wq, count);
+
+ if (send_tx_ready) {
+ /*
+ * Take the rx lock before checking service readiness. This
+ * guarantees that if __reset_service() has just made the
+ * service inactive, we either see it and don't set the tx_ready
+ * flag, or else set the flag before cancel_pending_rx() runs
+ * (and clears it).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the tx_ready event */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ service->tx_ready = true;
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+
+ /* Schedule RX processing by the service driver. */
+ queue_rx_work(service);
+ }
+
+ vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+ unsigned long bits, vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_service_driver *driver;
+ unsigned long flags;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ /* Ignore the notification since the service id doesn't exist */
+ dev_err(&session->dev, "notification for unknown service %d\n",
+ service_id);
+ return;
+ }
+
+ /*
+ * Take the rx lock before checking service readiness. This guarantees
+ * that if __reset_service() has just made the service inactive, we
+ * either see it and don't send the notification, or else send it
+ * before cancel_pending_rx() runs (and thus before the driver is
+ * deactivated).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the notification. */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ /* There should be a driver bound on the service */
+ if (WARN_ON(!service->dev.driver)) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ driver = to_vs_service_driver(service->dev.driver);
+ /* Call the driver's notify function */
+ driver->notify(service, bits);
+
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+ return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+ /*
+ * We throttle resets if too little time elapsed between the service
+ * last becoming ready, and the service last starting a reset.
+ *
+ * We do not use the current time here because it includes the time
+ * taken by the local service driver to actually process the reset.
+ */
+ return service->last_reset && service->last_ready && time_before(
+ service->last_reset,
+ service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+ /*
+ * Reset throttling cools off if enough time has elapsed since the
+ * last reset request.
+ *
+ * We check against the last requested reset, not the last serviced
+ * reset or ready. If we are throttling, a reset may not have been
+ * serviced for some time even though we are still receiving requests.
+ */
+ return service->reset_delay && service->last_reset_request &&
+ time_after(jiffies, service->last_reset_request +
+ reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ * - If a reset request is processed and the last ready was less than
+ * RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ * throttle resets.
+ *
+ * - The ready delay increases exponentially on each throttled reset
+ * between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ * - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ * reset requests have cooled off.
+ *
+ * - Reset requests have cooled off when no reset requests have been
+ * received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ * ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ unsigned long delay;
+ bool wait_for_cooloff = false;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* This should only be called when the service enters reset. */
+ WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+ if (ready_needs_delay(service)) {
+ /* Reset delay increments exponentially */
+ if (!service->reset_delay) {
+ service->reset_delay = RESET_THROTTLE_MIN;
+ } else if (service->reset_delay < RESET_THROTTLE_MAX) {
+ service->reset_delay *= 2;
+ } else {
+ wait_for_cooloff = true;
+ }
+
+ delay = service->reset_delay;
+ } else {
+ /* The reset request appears to have been be sane. */
+ delay = 0;
+
+ }
+
+ if (service->reset_delay > 0) {
+ /*
+ * Schedule cooloff work, to set the reset_delay to 0 if
+ * the reset requests stop for long enough.
+ */
+ schedule_delayed_work(&service->cooloff_work,
+ reset_cool_off(service));
+ }
+
+ if (wait_for_cooloff) {
+ /*
+ * We need to finish cooling off before we service resets
+ * again. Schedule cooloff_work to run after the current
+ * cooloff period ends; it may reschedule itself even later
+ * if any more requests arrive.
+ */
+ dev_err(&session->dev,
+ "Service %s is resetting too fast - must cool off for %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(reset_cool_off(service)));
+ return;
+ }
+
+ if (delay)
+ dev_err(&session->dev,
+ "Service %s is resetting too fast - delaying ready by %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(delay));
+
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Service %s will become ready in %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(delay));
+
+ if (service->last_ready)
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Last became ready %u ms ago\n",
+ msecs_ago(service->last_ready));
+ if (service->reset_delay >= RESET_THROTTLE_MAX)
+ dev_err(&session->dev, "Service %s hit max reset throttle\n",
+ dev_name(&service->dev));
+
+ schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+ struct vs_session_device *session = container_of(work,
+ struct vs_session_device, activation_work);
+ struct vs_service_device *core_service = session->core_service;
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ int activation_state;
+ int ret;
+
+ if (WARN_ON(!core_service))
+ return;
+
+ if (WARN_ON(!session_drv))
+ return;
+
+ /*
+ * We use an atomic to prevent duplicate activations if we race with
+ * an activate after a reset. This is very unlikely, but possible if
+ * this work item is preempted.
+ */
+ activation_state = atomic_cmpxchg(&session->activation_state,
+ VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+ switch (activation_state) {
+ case VS_SESSION_ACTIVATE:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service will be activated\n");
+ vs_service_enable(core_service);
+ break;
+
+ case VS_SESSION_RESET:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service will be deactivated\n");
+
+ /* Handle the core service reset */
+ ret = service_handle_reset(session, core_service, true);
+
+ /* Tell the transport if the reset succeeded */
+ if (ret >= 0)
+ session->transport->vt->ready(session->transport);
+ else
+ dev_err(&session->dev, "core service reset unhandled: %d\n",
+ ret);
+
+ break;
+
+ default:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service already active\n");
+ break;
+ }
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+ atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+ schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+ atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+ schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ int err;
+
+ err = kstrtoul(buf, 0, &session->debug_mask);
+ if (err)
+ return err;
+
+ /* Clear any bits we don't know about */
+ session->debug_mask &= VS_DEBUG_ALL;
+
+ return count;
+}
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct device_attribute vservices_session_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR_RO(name),
+#ifdef CONFIG_VSERVICES_DEBUG
+ __ATTR(debug_mask, S_IRUGO | S_IWUSR,
+ debug_mask_show, debug_mask_store),
+#endif
+ __ATTR_NULL,
+};
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+ mutex_lock(&vs_session_lock);
+ idr_remove(&session_idr, session->session_num);
+ mutex_unlock(&vs_session_lock);
+ return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ vs_session_free_idr(session);
+
+ kfree(session->name);
+ kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+ struct device_driver *driver)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+ return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return 0;
+
+ /*
+ * Abort any pending session activation. We rely on the transport to
+ * not call vs_session_handle_activate after this point.
+ */
+ cancel_work_sync(&session->activation_work);
+
+ /* Abort any pending fatal error handling, which is redundant now. */
+ cancel_work_sync(&session->fatal_error_work);
+
+ /*
+ * Delete the core service. This will implicitly delete everything
+ * else (in reset on the client side, and in release on the server
+ * side). The session holds a reference, so this won't release the
+ * service struct.
+ */
+ delete_service(core_service);
+
+ /* Now clean up the core service. */
+ session->core_service = NULL;
+
+ /* Matches the get in vs_service_register() */
+ vs_put_service(core_service);
+
+ return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ dev_dbg(dev, "uevent\n");
+
+ if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ dev_dbg(dev, "shutdown\n");
+
+ /* Do a transport reset */
+ session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+ .name = "vservices-session",
+ .match = vs_session_bus_match,
+ .remove = vs_session_bus_remove,
+ .dev_attrs = vservices_session_dev_attrs,
+ .uevent = vservices_session_uevent,
+ .shutdown = vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+ struct vs_session_device *session = vs_service_get_session(service);
+ int ret;
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+ /*
+ * Increase the reference count on the service driver. We don't allow
+ * service driver modules to be removed if there are any device
+ * instances present. The devices must be explicitly removed first.
+ */
+ if (!try_module_get(vsdrv->driver.owner))
+ return -ENODEV;
+
+ ret = vsdrv->probe(service);
+ if (ret) {
+ module_put(vsdrv->driver.owner);
+ return ret;
+ }
+
+ service->driver_probed = true;
+
+ try_start_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+ int err = 0;
+
+ reset_service(service);
+
+ /* Prevent reactivation of the driver */
+ service->driver_probed = false;
+
+ /* The driver has now had its reset() callback called; remove it */
+ vsdrv->remove(service);
+
+ /*
+ * Take the service's state mutex and spinlock. This ensures that any
+ * thread that is calling vs_state_lock_safe[_bh] will either complete
+ * now, or see the driver removal and fail, irrespective of which type
+ * of lock it is using.
+ */
+ mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+ spin_lock_bh(&service->state_spinlock);
+
+ /* Release all the locks. */
+ spin_unlock_bh(&service->state_spinlock);
+ mutex_unlock(&service->state_mutex);
+ mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ service->state_spinlock_used = false;
+ service->state_mutex_used = false;
+#endif
+
+ module_put(vsdrv->driver.owner);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ dev_dbg(dev, "uevent\n");
+
+ if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+ struct vs_session_device *session, bool server,
+ const char *transport_name)
+{
+ char *sysfs_name;
+ struct kobject *sysfs_parent = vservices_client_root;
+
+ if (!transport_name)
+ return -EINVAL;
+
+ sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+ transport_name);
+ if (!sysfs_name)
+ return -ENOMEM;
+
+ if (server)
+ sysfs_parent = vservices_server_root;
+
+ session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+ kfree(sysfs_name);
+ if (!session->sysfs_entry)
+ return -ENOMEM;
+ return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+ int err, id;
+
+retry:
+ if (!idr_pre_get(&session_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&vs_session_lock);
+ err = idr_get_new_above(&session_idr, session, 0, &id);
+ if (err == 0) {
+ if (id >= VS_MAX_SESSIONS) {
+ /* We are out of session ids */
+ idr_remove(&session_idr, id);
+ mutex_unlock(&vs_session_lock);
+ return -EBUSY;
+ }
+ }
+ mutex_unlock(&vs_session_lock);
+ if (err == -EAGAIN)
+ goto retry;
+ if (err < 0)
+ return err;
+#else
+ int id;
+
+ mutex_lock(&vs_session_lock);
+ id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+ mutex_unlock(&vs_session_lock);
+
+ if (id == -ENOSPC)
+ return -EBUSY;
+ else if (id < 0)
+ return id;
+#endif
+
+ session->session_num = id;
+ return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+ struct device *parent, bool server, const char *transport_name)
+{
+ struct device *dev;
+ struct vs_session_device *session;
+ int ret = -ENOMEM;
+
+ WARN_ON(!transport);
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ goto fail_session_alloc;
+
+ session->transport = transport;
+ session->is_server = server;
+ session->name = kstrdup(transport_name, GFP_KERNEL);
+ if (!session->name)
+ goto fail_free_session;
+
+ INIT_WORK(&session->activation_work, session_activation_work);
+ INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+ session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+ idr_init(&session->service_idr);
+ mutex_init(&session->service_idr_lock);
+
+ /*
+ * We must create session sysfs entry before device_create
+ * so, that sysfs entry is available while registering
+ * core service.
+ */
+ ret = vs_session_create_sysfs_entry(transport, session, server,
+ transport_name);
+ if (ret)
+ goto fail_free_session;
+
+ ret = vs_session_alloc_idr(session);
+ if (ret)
+ goto fail_sysfs_entry;
+
+ dev = &session->dev;
+ dev->parent = parent;
+ dev->bus = &vs_session_bus_type;
+ dev->release = vs_session_device_release;
+ dev_set_name(dev, "vservice:%d", session->session_num);
+
+ ret = device_register(dev);
+ if (ret) {
+ goto fail_session_map;
+ }
+
+ /* Add a symlink to transport device inside session device sysfs dir */
+ if (parent) {
+ ret = sysfs_create_link(&session->dev.kobj,
+ &parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+ if (ret) {
+ dev_err(&session->dev,
+ "Error %d creating transport symlink\n",
+ ret);
+ goto fail_session_device_unregister;
+ }
+ }
+
+ return session;
+
+fail_session_device_unregister:
+ device_unregister(&session->dev);
+ kobject_put(session->sysfs_entry);
+ /* Remaining cleanup will be done in vs_session_release */
+ return ERR_PTR(ret);
+fail_session_map:
+ vs_session_free_idr(session);
+fail_sysfs_entry:
+ kobject_put(session->sysfs_entry);
+fail_free_session:
+ kfree(session->name);
+ kfree(session);
+fail_session_alloc:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (WARN_ON(!core_service))
+ return;
+
+ blocking_notifier_call_chain(&vs_session_notifier_list,
+ VS_SESSION_NOTIFY_ADD, session);
+
+ vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+ if (session->dev.parent)
+ sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+ blocking_notifier_call_chain(&vs_session_notifier_list,
+ VS_SESSION_NOTIFY_REMOVE, session);
+
+ device_unregister(&session->dev);
+
+ kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+ struct vs_service_device *service;
+ struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+ struct service_unbind_work_struct *unbind_work = container_of(work,
+ struct service_unbind_work_struct, work);
+
+ device_release_driver(&unbind_work->service->dev);
+
+ /* Matches vs_get_service() in vs_session_unbind_driver() */
+ vs_put_service(unbind_work->service);
+ kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+ struct service_unbind_work_struct *unbind_work =
+ kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+ if (!unbind_work)
+ return -ENOMEM;
+
+ INIT_WORK(&unbind_work->work, service_unbind_work);
+
+ /* Put in service_unbind_work() */
+ unbind_work->service = vs_get_service(service);
+ schedule_work(&unbind_work->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+ int r;
+
+ printk(KERN_INFO "vServices Framework 1.0\n");
+
+ vservices_root = kobject_create_and_add("vservices", NULL);
+ if (!vservices_root) {
+ r = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ r = bus_register(&vs_session_bus_type);
+ if (r < 0)
+ goto fail_bus_register;
+
+ r = vs_devio_init();
+ if (r < 0)
+ goto fail_devio_init;
+
+ return 0;
+
+fail_devio_init:
+ bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+ kobject_put(vservices_root);
+fail_create_root:
+ return r;
+}
+
+static void __exit vservices_exit(void)
+{
+ printk(KERN_INFO "vServices Framework exit\n");
+
+ vs_devio_exit();
+ bus_unregister(&vs_session_bus_type);
+ kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 000000000000..f51d535b3576
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID 0xffff
+#define VS_SERVICE_ALREADY_RESET 1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS 4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET 12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+ (~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+ (1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+ ((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1 VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME "session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME "transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+ return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+ VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+ return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+ unsigned int reserved_bits)
+{
+ *service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+ VS_SERVICE_ID_TRANSPORT_OFFSET);
+ *service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+ VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+ struct device_driver driver;
+ struct bus_type *service_bus;
+ bool is_server;
+
+ /* These are all called with the core service state lock held. */
+ int (*service_added)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_start)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_local_reset)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_removed)(struct vs_session_device *session,
+ struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+ container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+ struct vs_session_device *session,
+ vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+ struct vs_service_device *parent,
+ vs_service_id_t service_id,
+ const char *protocol,
+ const char *name,
+ const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+ struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+ vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+ struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+ return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 000000000000..cfbc5dfe174f
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+ unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+ /* NOTE: Do not change this message - is it used for system testing */
+ dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *mbuf)
+{
+ dev_info(&service->dev, "skeleton handle_messasge\n");
+ return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+ u32 flags)
+{
+ dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+ dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+ service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+ struct skeleton_info *info;
+ int err = -ENOMEM;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto fail;
+
+ dev_set_drvdata(&service->dev, info);
+ return 0;
+
+fail:
+ return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+ struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+ dev_info(&service->dev, "skeleton remove\n");
+ kfree(info);
+ return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+ .protocol = "com.ok-labs.skeleton",
+ .is_server = true,
+ .probe = vs_skeleton_probe,
+ .remove = vs_skeleton_remove,
+ .start = vs_skeleton_handle_start,
+ .receive = vs_skeleton_handle_message,
+ .notify = vs_skeleton_handle_notify,
+ .reset = vs_skeleton_handle_reset,
+ .driver = {
+ .name = "vs-server-skeleton",
+ .owner = THIS_MODULE,
+ .bus = &vs_server_bus_type,
+ },
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+ .protocol = "com.ok-labs.skeleton",
+ .is_server = false,
+ .probe = vs_skeleton_probe,
+ .remove = vs_skeleton_remove,
+ .start = vs_skeleton_handle_start,
+ .receive = vs_skeleton_handle_message,
+ .notify = vs_skeleton_handle_notify,
+ .reset = vs_skeleton_handle_reset,
+ .driver = {
+ .name = "vs-client-skeleton",
+ .owner = THIS_MODULE,
+ .bus = &vs_client_bus_type,
+ },
+};
+
+static int __init vs_skeleton_init(void)
+{
+ int ret;
+
+ ret = driver_register(&server_skeleton_driver.driver);
+ if (ret)
+ return ret;
+
+ ret = driver_register(&client_skeleton_driver.driver);
+ if (ret)
+ driver_unregister(&server_skeleton_driver.driver);
+
+ return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+ driver_unregister(&server_skeleton_driver.driver);
+ driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 000000000000..8e5055ca2269
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+ vs_service_id_t service_id;
+ unsigned offset;
+};
+
+#define VS_MAX_SERVICES 128
+#define VS_MAX_SERVICE_ID (VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 000000000000..37e84c4d4f1e
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,20 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+config VSERVICES_OKL4_AXON
+ tristate "OKL4 Microvisor Axon driver"
+ depends on VSERVICES_SUPPORT && OKL4_GUEST
+ default y
+ help
+ This option adds support for Virtual Services sessions using an OKL4
+ Microvisor Axon object as a transport.
+
+ If this driver is to be used in a Cell that has multiple
+ discontiguous regions in its physical memory pool, the
+ CONFIG_DMA_CMA option must also be selected (or CONFIG_CMA
+ in older kernels that do not have CONFIG_DMA_CMA).
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 000000000000..222fb512f946
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_OKL4_AXON) += vtransport_axon.o
+vtransport_axon-objs = axon.o
diff --git a/drivers/vservices/transport/axon.c b/drivers/vservices/transport/axon.c
new file mode 100644
index 000000000000..a140b4aac3e0
--- /dev/null
+++ b/drivers/vservices/transport/axon.c
@@ -0,0 +1,3573 @@
+/*
+ * drivers/vservices/transport/axon.c
+ *
+ * Copyright (c) 2015-2018 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the OKL4 Virtual Services transport driver for OKL4 Microvisor
+ * Axons (virtual inter-Cell DMA engines).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dma-contiguous.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+#include <asm/dma-contiguous.h>
+#endif
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
+#include <asm-generic/okl4_virq.h>
+#include <asm/byteorder.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include <microvisor/microvisor.h>
+
+#include "../transport.h"
+#include "../session.h"
+#include "../debug.h"
+
+#define DRIVER_AUTHOR "Cog Systems Pty Ltd"
+#define DRIVER_DESC "OKL4 vServices Axon Transport Driver"
+#define DRIVER_NAME "vtransport_axon"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || \
+ defined(CONFIG_NO_DEPRECATED_MEMORY_BARRIERS)
+#define smp_mb__before_atomic_dec smp_mb__before_atomic
+#define smp_mb__before_atomic_inc smp_mb__before_atomic
+#define smp_mb__after_atomic_dec smp_mb__after_atomic
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+#define DMA_ATTRS unsigned long
+#else
+#define DMA_ATTRS struct dma_attrs *
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && \
+ !defined(CONFIG_CMA)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static struct kmem_cache *mbuf_cache;
+
+struct child_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+/* Number of services in the transport array to allocate at a time */
+#define SERVICES_ALLOC_CHUNK 16
+#define MSG_SEND_FREE_BUFS VS_SERVICE_ID_RESERVED_1
+
+/* The maximum value we allow for the free_bufs_balance counter */
+#define MAX_BALANCE 1
+
+/*
+ * The free bufs quota must be enough to take free_bufs_balance from its
+ * minimum to its maximum.
+ */
+#define FREE_BUFS_QUOTA (MAX_BALANCE * 2)
+
+/*
+ * The free bufs retry delay is the period in jiffies that we delay retrying
+ * after an out-of-memory condition when trying to send a free bufs message.
+ */
+#define FREE_BUFS_RETRY_DELAY 2
+
+/* The minimum values we permit for queue and message size. */
+#define MIN_QUEUE_SIZE ((size_t)4)
+#define MIN_MSG_SIZE (32 - sizeof(vs_service_id_t))
+
+/*
+ * The maximum size for a batched receive. This should be larger than the
+ * maximum message size, and large enough to avoid excessive context switching
+ * overheads, yet small enough to avoid blocking the tasklet queue for too
+ * long.
+ */
+#define MAX_TRANSFER_CHUNK 65536
+
+#define INC_MOD(x, m) { \
+ x++; \
+ if (x == m) x = 0; \
+}
+
+/* Local Axon cleanup workqueue */
+struct workqueue_struct *work_queue;
+
+/*
+ * True if there is only one physical segment being used for kernel memory
+ * allocations. If this is false, the device must have a usable CMA region.
+ */
+static bool okl4_single_physical_segment;
+
+/* OKL4 MMU capability. */
+static okl4_kcap_t okl4_mmu_cap;
+
+/*
+ * Per-service TX buffer allocation pool.
+ *
+ * We cannot use a normal DMA pool for TX buffers, because alloc_mbuf can be
+ * called with GFP_ATOMIC, and a normal DMA pool alloc will take pages from
+ * a global emergency pool if GFP_WAIT is not set. The emergency pool is not
+ * guaranteed to be in the same physical segment as this device's DMA region,
+ * so it might not be usable by the axon.
+ *
+ * Using a very simple allocator with preallocated memory also speeds up the
+ * TX path.
+ *
+ * RX buffers use a standard Linux DMA pool, shared between all services,
+ * rather than this struct. They are preallocated by definition, so the speed
+ * of the allocator doesn't matter much for them. Also, they're always
+ * allocated with GFP_KERNEL (which includes GFP_WAIT) so the normal DMA pool
+ * will use memory from the axon's contiguous region.
+ */
+struct vs_axon_tx_pool {
+ struct vs_transport_axon *transport;
+ struct kref kref;
+
+ void *base_vaddr;
+ dma_addr_t base_laddr;
+
+ unsigned alloc_order;
+ unsigned count;
+
+ struct work_struct free_work;
+ unsigned long alloc_bitmap[];
+};
+
+struct vs_axon_rx_freelist_entry {
+ struct list_head list;
+ dma_addr_t laddr;
+};
+
+/* Service info */
+struct vs_mv_service_info {
+ struct vs_service_device *service;
+
+ /* True if the session has started the service */
+ bool ready;
+
+ /* Number of send buffers we have allocated, in total. */
+ atomic_t send_inflight;
+
+ /*
+ * Number of send buffers we have allocated but not yet sent.
+ * This should always be zero if ready is false.
+ */
+ atomic_t send_alloc;
+
+ /*
+ * Number of receive buffers we have received and not yet freed.
+ * This should always be zero if ready is false.
+ */
+ atomic_t recv_inflight;
+
+ /*
+ * Number of receive buffers we have freed, but not told the other end
+ * about yet.
+ *
+ * The watermark is the maximum number of freed buffers we can
+ * accumulate before we send a dummy message to the remote end to ack
+ * them. This is used in situations where the protocol allows the remote
+ * end to reach its send quota without guaranteeing a reply; the dummy
+ * message lets it make progress even if our service driver doesn't send
+ * an answer that we can piggy-back the acks on.
+ */
+ atomic_t recv_freed;
+ unsigned int recv_freed_watermark;
+
+ /*
+ * Number of buffers that have been left allocated after a reset. If
+ * this count is nonzero, then the service has been disabled by the
+ * session layer, and needs to be re-enabled when it reaches zero.
+ */
+ atomic_t outstanding_frees;
+
+ /* TX allocation pool */
+ struct vs_axon_tx_pool *tx_pool;
+
+ /* RX allocation count */
+ unsigned rx_allocated;
+
+ /* Reference count for this info struct. */
+ struct kref kref;
+
+ /* RCU head for cleanup */
+ struct rcu_head rcu_head;
+};
+
+/*
+ * Transport readiness state machine
+ *
+ * This is similar to the service readiness state machine, but simpler,
+ * because there are fewer transition triggers.
+ *
+ * The states are:
+ * INIT: Initial state. This occurs transiently during probe.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement.
+ * RESET: The transport is inactive at both ends, and the session layer has
+ * not yet told us to start activating.
+ * LOCAL_READY: The session layer has told us to start activating, and we
+ * have notified the remote end that we're ready.
+ * REMOTE_READY: The remote end has notified us that it is ready, but the
+ * local session layer hasn't decided to become ready yet.
+ * ACTIVE: Both ends are ready to communicate.
+ * SHUTDOWN: The transport is shutting down and should not become ready.
+ */
+enum vs_transport_readiness {
+ VS_TRANSPORT_INIT = 0,
+ VS_TRANSPORT_LOCAL_RESET,
+ VS_TRANSPORT_RESET,
+ VS_TRANSPORT_LOCAL_READY,
+ VS_TRANSPORT_REMOTE_READY,
+ VS_TRANSPORT_ACTIVE,
+ VS_TRANSPORT_SHUTDOWN,
+};
+
+/*
+ * Transport reset / ready VIRQ payload bits
+ */
+enum vs_transport_reset_virq {
+ VS_TRANSPORT_VIRQ_RESET_REQ = (1 << 0),
+ VS_TRANSPORT_VIRQ_RESET_ACK = (1 << 1),
+ VS_TRANSPORT_VIRQ_READY = (1 << 2),
+};
+
+/*
+ * Internal definitions of the transport and message buffer structures.
+ */
+#define MAX_NOTIFICATION_LINES 16 /* Enough for 512 notifications each way */
+
+struct vs_transport_axon {
+ struct device *axon_dev;
+
+ struct okl4_axon_tx *tx;
+ struct okl4_axon_queue_entry *tx_descs;
+ struct vs_axon_tx_pool **tx_pools;
+ struct okl4_axon_rx *rx;
+ struct okl4_axon_queue_entry *rx_descs;
+ void **rx_ptrs;
+
+ dma_addr_t tx_phys, rx_phys;
+ size_t tx_size, rx_size;
+
+ okl4_kcap_t segment;
+ okl4_laddr_t segment_base;
+
+ okl4_kcap_t tx_cap, rx_cap, reset_cap;
+ unsigned int tx_irq, rx_irq, reset_irq;
+ okl4_interrupt_number_t reset_okl4_irq;
+
+ unsigned int notify_tx_nirqs;
+ okl4_kcap_t notify_cap[MAX_NOTIFICATION_LINES];
+ unsigned int notify_rx_nirqs;
+ unsigned int notify_irq[MAX_NOTIFICATION_LINES];
+
+ bool is_server;
+ size_t msg_size, queue_size;
+
+ /*
+ * The handle to the device tree node for the virtual-session node
+ * associated with the axon.
+ */
+ struct device_node *of_node;
+
+ struct list_head child_dev_list;
+
+ /*
+ * Hold queue and tx tasklet used to buffer and resend mbufs blocked
+ * by a full outgoing axon queue, due to a slow receiver or a halted
+ * axon.
+ */
+ struct list_head tx_queue;
+ struct tasklet_struct tx_tasklet;
+ u32 tx_uptr_freed;
+
+ /*
+ * The readiness state of the transport, and a spinlock protecting it.
+ * Note that this is different to the session's readiness state
+ * machine, though it has the same basic purpose.
+ */
+ enum vs_transport_readiness readiness;
+ spinlock_t readiness_lock;
+
+ struct tasklet_struct rx_tasklet;
+ struct timer_list rx_retry_timer;
+ struct list_head rx_freelist;
+ u32 rx_alloc_extra;
+ struct dma_pool *rx_pool;
+ spinlock_t rx_alloc_lock;
+ u32 rx_uptr_allocated;
+
+ struct vs_session_device *session_dev;
+ struct vs_transport transport;
+
+ DECLARE_BITMAP(service_bitmap, VS_SERVICE_ID_BITMAP_BITS);
+
+ struct delayed_work free_bufs_work;
+
+ /*
+ * Freed buffers messages balance counter. This counter is incremented
+ * when we send a freed buffers message and decremented when we receive
+ * one. If the balance is negative then we need to send a message
+ * as an acknowledgement to the other end, even if there are no
+ * freed buffers to acknowledge.
+ */
+ atomic_t free_bufs_balance;
+
+ /*
+ * Flag set when a service exceeds its freed buffers watermark,
+ * telling free_bufs_work to send a message when the balance
+ * counter is non-negative. This is ignored, and a message is
+ * sent in any case, if the balance is negative.
+ */
+ bool free_bufs_pending;
+
+ /* Pool for allocating outgoing free bufs messages */
+ struct vs_axon_tx_pool *free_bufs_pool;
+};
+
+#define to_vs_transport_axon(t) \
+ container_of(t, struct vs_transport_axon, transport)
+
+struct vs_mbuf_axon {
+ struct vs_mbuf base;
+ struct vs_transport_axon *owner;
+ dma_addr_t laddr;
+ struct vs_axon_tx_pool *pool;
+};
+
+#define to_vs_mbuf_axon(b) container_of(b, struct vs_mbuf_axon, base)
+
+/*
+ * Buffer allocation
+ *
+ * Buffers used by axons must be allocated within a single contiguous memory
+ * region, backed by a single OKL4 physical segment. This is similar to how
+ * the DMA allocator normally works, but we can't use the normal DMA allocator
+ * because the platform code will remap the allocated memory with caching
+ * disabled.
+ *
+ * We borrow the useful parts of the DMA allocator by providing our own DMA
+ * mapping ops which don't actually remap the memory.
+ */
+static void *axon_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, DMA_ATTRS attrs)
+{
+ unsigned long order;
+ size_t count;
+ struct page *page;
+ void *ptr;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+ if (!(gfp & __GFP_WAIT))
+#else
+ if (!(gfp & __GFP_RECLAIM))
+#endif
+ return NULL;
+
+ order = get_order(size);
+ count = size >> PAGE_SHIFT;
+
+ if (dev_get_cma_area(dev)) {
+ page = dma_alloc_from_contiguous(dev, count, order);
+
+ if (!page)
+ return NULL;
+ } else {
+ struct page *p, *e;
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ /* Split huge page and free any excess pages */
+ split_page(page, order);
+ for (p = page + count, e = page + (1 << order); p < e; p++)
+ __free_page(p);
+ }
+
+ if (PageHighMem(page)) {
+ struct vm_struct *area = get_vm_area(size, VM_USERMAP);
+ if (!area)
+ goto free_pages;
+ ptr = area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+ if (ioremap_page_range((unsigned long)ptr,
+ (unsigned long)ptr + size,
+ area->phys_addr, PAGE_KERNEL)) {
+ vunmap(ptr);
+ goto free_pages;
+ }
+ } else {
+ ptr = page_address(page);
+ }
+
+ *handle = (dma_addr_t)page_to_pfn(page) << PAGE_SHIFT;
+
+ dev_dbg(dev, "dma_alloc: %#tx bytes at %pK (%#llx), %s cma, %s high\n",
+ size, ptr, (long long)*handle,
+ dev_get_cma_area(dev) ? "is" : "not",
+ PageHighMem(page) ? "is" : "not");
+
+ return ptr;
+
+free_pages:
+ if (dev_get_cma_area(dev)) {
+ dma_release_from_contiguous(dev, page, count);
+ } else {
+ struct page *e = page + count;
+
+ while (page < e) {
+ __free_page(page);
+ page++;
+ }
+ }
+
+ return NULL;
+}
+
+static void axon_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, DMA_ATTRS attrs)
+{
+ struct page *page = pfn_to_page(handle >> PAGE_SHIFT);
+
+ size = PAGE_ALIGN(size);
+
+ if (PageHighMem(page)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
+
+ if (dev_get_cma_area(dev)) {
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ } else {
+ struct page *e = page + (size >> PAGE_SHIFT);
+
+ while (page < e) {
+ __free_page(page);
+ page++;
+ }
+ }
+}
+
+struct dma_map_ops axon_dma_ops = {
+ .alloc = axon_dma_alloc,
+ .free = axon_dma_free,
+};
+
+/*
+ * Quotas
+ * ------
+ *
+ * Each service has two quotas, one for send and one for receive. The
+ * send quota is incremented when we allocate an mbuf. The send quota
+ * is decremented by receiving an freed buffer ack from the remove
+ * end, either in the reserved bits of the service id or in a special
+ * free bufs message.
+ *
+ * The receive quota is incremented whenever we receive a message and
+ * decremented when we free the mbuf. Exceeding the receive quota
+ * indicates that something bad has happened since the other end's
+ * send quota should have prevented it from sending the
+ * message. Exceeding the receive quota indicates a driver bug since
+ * the two ends are disagreeing about the quotas. If this happens then
+ * a warning is printed and the offending service is reset.
+ */
+
+/*
+ * The base of the mbuf has the destination service id, but we pass the
+ * data pointer starting after the service id. The following helper
+ * functions are used to avoid ugly pointer arithmetic when handling
+ * mbufs.
+ */
+static size_t mbuf_real_size(struct vs_mbuf_axon *mbuf)
+{
+ return mbuf->base.size + sizeof(vs_service_id_t);
+}
+
+static void *mbuf_real_base(struct vs_mbuf_axon *mbuf)
+{
+ return mbuf->base.data - sizeof(vs_service_id_t);
+}
+/*
+ * Get the service_id and reserved bits from a message buffer and the
+ * clear the reserved bits so the upper layers don't see them.
+ */
+vs_service_id_t
+transport_get_mbuf_service_id(struct vs_transport_axon *transport,
+ void *data, unsigned int *freed_acks)
+{
+ unsigned int reserved_bits;
+ vs_service_id_t id;
+
+ /* Get the real service id and reserved bits */
+ id = *(vs_service_id_t *)data;
+ reserved_bits = vs_get_service_id_reserved_bits(id);
+ id = vs_get_real_service_id(id);
+
+ /* Clear the reserved bits in the service id */
+ vs_set_service_id_reserved_bits(&id, 0);
+ if (freed_acks) {
+ *(vs_service_id_t *)data = id;
+ *freed_acks = reserved_bits;
+ }
+ return id;
+}
+
+static void
+__transport_get_service_info(struct vs_mv_service_info *service_info)
+{
+ kref_get(&service_info->kref);
+}
+
+static struct vs_mv_service_info *
+transport_get_service_info(struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+
+ rcu_read_lock();
+ service_info = rcu_dereference(service->transport_priv);
+ if (service_info)
+ __transport_get_service_info(service_info);
+ rcu_read_unlock();
+
+ return service_info;
+}
+
+static struct vs_mv_service_info *
+transport_get_service_id_info(struct vs_transport_axon *transport,
+ vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_mv_service_info *service_info;
+
+ service = vs_session_get_service(transport->session_dev, service_id);
+ if (!service)
+ return NULL;
+
+ service_info = transport_get_service_info(service);
+
+ vs_put_service(service);
+ return service_info;
+}
+
+static void transport_info_free(struct rcu_head *rcu_head)
+{
+ struct vs_mv_service_info *service_info =
+ container_of(rcu_head, struct vs_mv_service_info, rcu_head);
+
+ vs_put_service(service_info->service);
+ kfree(service_info);
+}
+
+static void transport_info_release(struct kref *kref)
+{
+ struct vs_mv_service_info *service_info =
+ container_of(kref, struct vs_mv_service_info, kref);
+
+ call_rcu(&service_info->rcu_head, transport_info_free);
+}
+
+static void transport_put_service_info(struct vs_mv_service_info *service_info)
+{
+ kref_put(&service_info->kref, transport_info_release);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport);
+
+static void transport_fatal_error(struct vs_transport_axon *transport,
+ const char *msg)
+{
+ dev_err(transport->axon_dev, "Fatal transport error (%s); resetting\n",
+ msg);
+#ifdef DEBUG
+ dump_stack();
+#endif
+ transport_axon_reset(transport);
+}
+
+static unsigned int reduce_send_quota(struct vs_transport_axon *transport,
+ struct vs_mv_service_info *service_info, unsigned int count,
+ bool allow_tx_ready)
+{
+ int new_inflight, send_alloc;
+ bool was_over_quota, is_over_quota;
+
+ /* FIXME: Redmine issue #1303 - philip. */
+ spin_lock_irq(&transport->readiness_lock);
+ /*
+ * We read the current send_alloc for error checking *before*
+ * decrementing send_inflight. This avoids any false positives
+ * due to send_alloc being incremented by a concurrent alloc_mbuf.
+ *
+ * Note that there is an implicit smp_mb() before atomic_sub_return(),
+ * matching the explicit one in alloc_mbuf.
+ */
+ send_alloc = atomic_read(&service_info->send_alloc);
+ new_inflight = atomic_sub_return(count, &service_info->send_inflight);
+
+ spin_unlock_irq(&transport->readiness_lock);
+ if (WARN_ON(new_inflight < send_alloc)) {
+ dev_err(transport->axon_dev,
+ "inflight sent messages for service %d is less than the number of allocated messages (%d < %d, was reduced by %d)\n",
+ service_info->service->id, new_inflight,
+ send_alloc, count);
+ transport_fatal_error(transport, "sent msg count underrun");
+ return 0;
+ }
+
+ was_over_quota = (new_inflight + count >=
+ service_info->service->send_quota);
+ is_over_quota = (new_inflight > service_info->service->send_quota);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Service %d quota %d -> %d (over_quota: %d -> %d)\n",
+ service_info->service->id, new_inflight + count,
+ new_inflight, was_over_quota, is_over_quota);
+
+ /*
+ * Notify the service that a buffer has been freed. We call tx_ready
+ * if this is a notification from the remote end (i.e. not an unsent
+ * buffer) and the quota has just dropped below the maximum.
+ */
+ vs_session_quota_available(transport->session_dev,
+ service_info->service->id, count,
+ !is_over_quota && was_over_quota && allow_tx_ready);
+
+ return count;
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+ dma_addr_t laddr);
+
+static void
+__transport_tx_cleanup(struct vs_transport_axon *transport)
+{
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ uptr = transport->tx_uptr_freed;
+ desc = &transport->tx_descs[uptr];
+
+ while (!okl4_axon_data_info_getpending(&desc->info)) {
+ if (!transport->tx_pools[uptr])
+ break;
+
+ __transport_tx_pool_free(transport->tx_pools[uptr],
+ okl4_axon_data_info_getladdr(&desc->info));
+ transport->tx_pools[uptr] = NULL;
+
+ INC_MOD(uptr, transport->tx->queues[0].entries);
+ desc = &transport->tx_descs[uptr];
+ transport->tx_uptr_freed = uptr;
+ }
+}
+
+static void
+transport_axon_free_tx_pool(struct work_struct *work)
+{
+ struct vs_axon_tx_pool *pool = container_of(work,
+ struct vs_axon_tx_pool, free_work);
+ struct vs_transport_axon *transport = pool->transport;
+
+ dmam_free_coherent(transport->axon_dev,
+ pool->count << pool->alloc_order,
+ pool->base_vaddr, pool->base_laddr);
+ devm_kfree(transport->axon_dev, pool);
+}
+
+static void
+transport_axon_queue_free_tx_pool(struct kref *kref)
+{
+ struct vs_axon_tx_pool *pool = container_of(kref,
+ struct vs_axon_tx_pool, kref);
+
+ /*
+ * Put the task on the axon local work queue for running in
+ * a context where IRQ is enabled.
+ */
+ INIT_WORK(&pool->free_work, transport_axon_free_tx_pool);
+ queue_work(work_queue, &pool->free_work);
+}
+
+static void
+transport_axon_put_tx_pool(struct vs_axon_tx_pool *pool)
+{
+ kref_put(&pool->kref, transport_axon_queue_free_tx_pool);
+}
+
+/* Low-level tx buffer allocation, without quota tracking. */
+static struct vs_mbuf_axon *
+__transport_alloc_mbuf(struct vs_transport_axon *transport,
+ vs_service_id_t service_id, struct vs_axon_tx_pool *pool,
+ size_t size, gfp_t gfp_flags)
+{
+ size_t real_size = size + sizeof(vs_service_id_t);
+ struct vs_mbuf_axon *mbuf;
+ unsigned index;
+
+ if (WARN_ON(real_size > (1 << pool->alloc_order))) {
+ dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+ real_size, (size_t)1 << pool->alloc_order);
+ goto fail_message_size;
+ }
+
+ kref_get(&pool->kref);
+
+ do {
+ index = find_first_zero_bit(pool->alloc_bitmap, pool->count);
+ if (unlikely(index >= pool->count)) {
+ /*
+ * No buffers left. This can't be an out-of-quota
+ * situation, because we've already checked the quota;
+ * it must be because there's a buffer left over in
+ * the tx queue. Clean out the tx queue and retry.
+ */
+ spin_lock_irq(&transport->readiness_lock);
+ __transport_tx_cleanup(transport);
+ spin_unlock_irq(&transport->readiness_lock);
+
+ index = find_first_zero_bit(pool->alloc_bitmap,
+ pool->count);
+ }
+ if (unlikely(index >= pool->count))
+ goto fail_buffer_alloc;
+ } while (unlikely(test_and_set_bit_lock(index, pool->alloc_bitmap)));
+
+ mbuf = kmem_cache_alloc(mbuf_cache, gfp_flags & ~GFP_ZONEMASK);
+ if (!mbuf)
+ goto fail_mbuf_alloc;
+
+ mbuf->base.is_recv = false;
+ mbuf->base.data = pool->base_vaddr + (index << pool->alloc_order);
+ mbuf->base.size = size;
+ mbuf->owner = transport;
+ mbuf->laddr = pool->base_laddr + (index << pool->alloc_order);
+ mbuf->pool = pool;
+
+ /*
+ * We put the destination service id in the mbuf, but increment the
+ * data pointer past it so the receiver doesn't always need to skip
+ * the service id.
+ */
+ *(vs_service_id_t *)mbuf->base.data = service_id;
+ mbuf->base.data += sizeof(vs_service_id_t);
+
+ return mbuf;
+
+fail_mbuf_alloc:
+ clear_bit_unlock(index, pool->alloc_bitmap);
+fail_buffer_alloc:
+ transport_axon_put_tx_pool(pool);
+fail_message_size:
+ return NULL;
+}
+
+/* Allocate a tx buffer for a specified service. */
+static struct vs_mbuf *transport_alloc_mbuf(struct vs_transport *_transport,
+ struct vs_service_device *service, size_t size, gfp_t gfp_flags)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ size_t real_size = size + sizeof(vs_service_id_t);
+ struct vs_mv_service_info *service_info = NULL;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id = service->id;
+
+ if (real_size > transport->msg_size) {
+ dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+ real_size, transport->msg_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON(service_id == MSG_SEND_FREE_BUFS))
+ return ERR_PTR(-ENXIO);
+
+ service_info = transport_get_service_info(service);
+ if (WARN_ON(!service_info))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_info->tx_pool) {
+ transport_put_service_info(service_info);
+ return ERR_PTR(-ECONNRESET);
+ }
+
+ if (!atomic_add_unless(&service_info->send_inflight, 1,
+ service_info->service->send_quota)) {
+ /* Service has reached its quota */
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Service %d is at max send quota %d\n",
+ service_id, service_info->service->send_quota);
+ transport_put_service_info(service_info);
+ return ERR_PTR(-ENOBUFS);
+ }
+
+ /*
+ * Increment the count of allocated but unsent mbufs. This is done
+ * *after* the send_inflight increment (with a barrier to enforce
+ * ordering) to ensure that send_inflight is never less than
+ * send_alloc - see reduce_send_quota().
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&service_info->send_alloc);
+
+ mbuf = __transport_alloc_mbuf(transport, service_id,
+ service_info->tx_pool, size, gfp_flags);
+ if (!mbuf) {
+ /*
+ * Failed to allocate a buffer - decrement our quota back to
+ * where it was.
+ */
+ atomic_dec(&service_info->send_alloc);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&service_info->send_inflight);
+
+ transport_put_service_info(service_info);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ transport_put_service_info(service_info);
+
+ return &mbuf->base;
+}
+
+static void transport_free_sent_mbuf(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+ dma_addr_t laddr)
+{
+ unsigned index = (laddr - pool->base_laddr) >> pool->alloc_order;
+
+ if (WARN_ON(index >= pool->count)) {
+ printk(KERN_DEBUG "free %#llx base %#llx order %d count %d\n",
+ (long long)laddr, (long long)pool->base_laddr,
+ pool->alloc_order, pool->count);
+ return;
+ }
+
+ clear_bit_unlock(index, pool->alloc_bitmap);
+ transport_axon_put_tx_pool(pool);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+ void *ptr, dma_addr_t laddr);
+
+static void transport_rx_recycle(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ void *data = mbuf_real_base(mbuf);
+ dma_addr_t laddr = mbuf->laddr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&transport->rx_alloc_lock, flags);
+
+ if (transport->rx_alloc_extra) {
+ transport->rx_alloc_extra--;
+ dma_pool_free(transport->rx_pool, data, laddr);
+ } else if (transport_rx_queue_buffer(transport, data, laddr) < 0) {
+ struct vs_axon_rx_freelist_entry *buf = data;
+ buf->laddr = laddr;
+ list_add_tail(&buf->list, &transport->rx_freelist);
+ tasklet_schedule(&transport->rx_tasklet);
+ } else {
+ tasklet_schedule(&transport->rx_tasklet);
+ }
+
+ spin_unlock_irqrestore(&transport->rx_alloc_lock, flags);
+}
+
+static void transport_free_mbuf_pools(struct vs_transport_axon *transport,
+ struct vs_service_device *service,
+ struct vs_mv_service_info *service_info)
+{
+ /*
+ * Free the TX allocation pool. This will also free any buffer
+ * memory allocated from the pool, so it is essential that
+ * this happens only after we have successfully freed all
+ * mbufs.
+ *
+ * Note that the pool will not exist if the core client is reset
+ * before it receives a startup message.
+ */
+ if (!IS_ERR_OR_NULL(service_info->tx_pool))
+ transport_axon_put_tx_pool(service_info->tx_pool);
+ service_info->tx_pool = NULL;
+
+ /* Mark the service's preallocated RX buffers as extra. */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ transport->rx_alloc_extra += service_info->rx_allocated;
+ service_info->rx_allocated = 0;
+ spin_unlock_irq(&transport->rx_alloc_lock);
+}
+
+/* Low-level tx or rx buffer free, with no quota tracking */
+static void __transport_free_mbuf(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, bool is_rx)
+{
+ if (is_rx) {
+ transport_rx_recycle(transport, mbuf);
+ } else {
+ __transport_tx_pool_free(mbuf->pool, mbuf->laddr);
+ }
+
+ kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void transport_free_mbuf(struct vs_transport *_transport,
+ struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+ struct vs_mv_service_info *service_info = NULL;
+ void *data = mbuf_real_base(mbuf);
+ vs_service_id_t service_id __maybe_unused =
+ transport_get_mbuf_service_id(transport, data, NULL);
+ bool is_recv = mbuf->base.is_recv;
+
+ WARN_ON(!service);
+ service_info = transport_get_service_info(service);
+
+ __transport_free_mbuf(transport, mbuf, is_recv);
+
+ /*
+ * If this message was left over from a service that has already been
+ * deleted, we don't need to do any quota accounting.
+ */
+ if (!service_info)
+ return;
+
+ if (unlikely(atomic_read(&service_info->outstanding_frees))) {
+ if (atomic_dec_and_test(&service_info->outstanding_frees)) {
+ dev_dbg(transport->axon_dev,
+ "service %d all outstanding frees done\n",
+ service->id);
+ transport_free_mbuf_pools(transport, service,
+ service_info);
+ vs_service_enable(service);
+ } else {
+ dev_dbg(transport->axon_dev,
+ "service %d outstanding frees -> %d\n",
+ service->id, atomic_read(
+ &service_info->outstanding_frees));
+ }
+ } else if (is_recv) {
+ smp_mb__before_atomic_dec();
+ atomic_dec(&service_info->recv_inflight);
+ if (atomic_inc_return(&service_info->recv_freed) >=
+ service_info->recv_freed_watermark) {
+ transport->free_bufs_pending = true;
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+ }
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Freed recv buffer for service %d rq=%d/%d, freed=%d (watermark = %d)\n",
+ service_id,
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota,
+ atomic_read(&service_info->recv_freed),
+ service_info->recv_freed_watermark);
+ } else {
+ /*
+ * We are freeing a message buffer that we allocated. This
+ * usually happens on error paths in application drivers if
+ * we allocated a buffer but failed to send it. In this case
+ * we need to decrement our own send quota since we didn't
+ * send anything.
+ */
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Freeing send buffer for service %d, send quota = %d\n",
+ service_id, atomic_read(&service_info->send_inflight));
+
+ smp_mb__before_atomic_dec();
+ atomic_dec(&service_info->send_alloc);
+
+ /*
+ * We don't allow the tx_ready handler to run when we are
+ * freeing an mbuf that we allocated.
+ */
+ reduce_send_quota(transport, service_info, 1, false);
+ }
+
+ transport_put_service_info(service_info);
+}
+
+static size_t transport_mbuf_size(struct vs_mbuf *_mbuf)
+{
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+
+ return mbuf_real_size(mbuf);
+}
+
+static size_t transport_max_mbuf_size(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ return transport->msg_size - sizeof(vs_service_id_t);
+}
+
+static int okl4_error_to_errno(okl4_error_t err) {
+ switch (err) {
+ case OKL4_OK:
+ return 0;
+ case OKL4_ERROR_AXON_QUEUE_NOT_MAPPED:
+ /* Axon has been reset locally */
+ return -ECONNRESET;
+ case OKL4_ERROR_AXON_QUEUE_NOT_READY:
+ /* No message buffers in the queue. */
+ return -ENOBUFS;
+ case OKL4_ERROR_AXON_INVALID_OFFSET:
+ case OKL4_ERROR_AXON_AREA_TOO_BIG:
+ /* Buffer address is bad */
+ return -EFAULT;
+ case OKL4_ERROR_AXON_BAD_MESSAGE_SIZE:
+ case OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED:
+ /* One of the Axon's message size limits has been exceeded */
+ return -EMSGSIZE;
+ default:
+ /* Miscellaneous failure, probably a bad cap */
+ return -EIO;
+ }
+}
+
+static void queue_tx_mbuf(struct vs_mbuf_axon *mbuf, struct vs_transport_axon *priv,
+ vs_service_id_t service_id)
+{
+ list_add_tail(&mbuf->base.queue, &priv->tx_queue);
+}
+
+static void free_tx_mbufs(struct vs_transport_axon *priv)
+{
+ struct vs_mbuf_axon *child, *tmp;
+
+ list_for_each_entry_safe(child, tmp, &priv->tx_queue, base.queue) {
+ list_del(&child->base.queue);
+ __transport_free_mbuf(priv, child, false);
+ }
+}
+
+static int __transport_flush(struct vs_transport_axon *transport)
+{
+ _okl4_sys_axon_trigger_send(transport->tx_cap);
+ return 0;
+}
+
+static int transport_flush(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ return __transport_flush(transport);
+}
+
+/*
+ * Low-level transport message send function.
+ *
+ * The caller must hold the transport->readiness_lock, and is responsible for
+ * freeing the mbuf on successful send (use transport_free_sent_mbuf). The
+ * mbuf should _not_ be freed if this function fails. The Virtual Service
+ * driver is responsible for freeing the mbuf in the failure case.
+ */
+static int __transport_send(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+ unsigned long flags)
+{
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+ struct vs_axon_tx_pool *old_pool;
+ dma_addr_t old_laddr;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "send %zu bytes to service %d\n",
+ mbuf->base.size, service_id);
+ vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+ uptr = ACCESS_ONCE(transport->tx->queues[0].uptr);
+ desc = &transport->tx_descs[uptr];
+
+ /* Is the descriptor ready to use? */
+ if (okl4_axon_data_info_getpending(&desc->info))
+ return -ENOSPC;
+ mb();
+
+ /* The descriptor is ours; save its old state and increment the uptr */
+ old_pool = transport->tx_pools[uptr];
+ if (old_pool != NULL)
+ old_laddr = okl4_axon_data_info_getladdr(&desc->info);
+ transport->tx_pools[uptr] = mbuf->pool;
+
+ INC_MOD(uptr, transport->tx->queues[0].entries);
+ ACCESS_ONCE(transport->tx->queues[0].uptr) = uptr;
+
+ /* Set up the descriptor */
+ desc->data_size = mbuf_real_size(mbuf);
+ okl4_axon_data_info_setladdr(&desc->info, mbuf->laddr);
+
+ /* Message is ready to go */
+ wmb();
+ okl4_axon_data_info_setpending(&desc->info, true);
+
+ if (flags & VS_TRANSPORT_SEND_FLAGS_MORE) {
+ /*
+ * This is a batched message, so we normally don't flush,
+ * unless we've filled the queue completely.
+ *
+ * Races on the queue descriptor don't matter here, because
+ * this is only an optimisation; the service should do an
+ * explicit flush when it finishes the batch anyway.
+ */
+ desc = &transport->tx_descs[uptr];
+ if (okl4_axon_data_info_getpending(&desc->info))
+ __transport_flush(transport);
+ } else {
+ __transport_flush(transport);
+ }
+
+ /* Free any buffer previously in the descriptor */
+ if (old_pool != NULL) {
+ u32 uptr_freed = transport->tx_uptr_freed;
+ INC_MOD(uptr_freed, transport->tx->queues[0].entries);
+ WARN_ON(uptr_freed != uptr);
+ __transport_tx_pool_free(old_pool, old_laddr);
+ transport->tx_uptr_freed = uptr_freed;
+ }
+
+ return 0;
+}
+
+static int transport_send_might_queue(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+ unsigned long flags, bool *queued)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&transport->readiness_lock);
+ *queued = false;
+
+ if (transport->readiness != VS_TRANSPORT_ACTIVE)
+ return -ECONNRESET;
+
+ if (!list_empty(&transport->tx_queue)) {
+ *queued = true;
+ } else {
+ ret = __transport_send(transport, mbuf, service_id, flags);
+ if (ret == -ENOSPC) {
+ *queued = true;
+ ret = 0;
+ }
+ }
+
+ if (*queued)
+ queue_tx_mbuf(mbuf, transport, service_id);
+
+ return ret;
+}
+
+static int transport_send(struct vs_transport *_transport,
+ struct vs_service_device *service, struct vs_mbuf *_mbuf,
+ unsigned long flags)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+ struct vs_mv_service_info *service_info;
+ vs_service_id_t service_id;
+ int recv_freed, freed_acks;
+ bool queued;
+ int err;
+ unsigned long irqflags;
+
+ if (WARN_ON(!transport || !mbuf || mbuf->owner != transport))
+ return -EINVAL;
+
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), NULL);
+
+ if (WARN_ON(service_id != service->id))
+ return -EINVAL;
+
+ service_info = transport_get_service_info(service);
+ if (!service_info)
+ return -EINVAL;
+
+ if (mbuf->base.is_recv) {
+ /*
+ * This message buffer was allocated for receive. We don't
+ * allow receive message buffers to be reused for sending
+ * because it makes our quotas inconsistent.
+ */
+ dev_err(&service_info->service->dev,
+ "Attempted to send a received message buffer\n");
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ if (!service_info->ready) {
+ transport_put_service_info(service_info);
+ return -ECOMM;
+ }
+
+ /*
+ * Set the message's service id reserved bits to the number of buffers
+ * we have freed. We can only ack 2 ^ VS_SERVICE_ID_RESERVED_BITS - 1
+ * buffers in one message.
+ */
+ do {
+ recv_freed = atomic_read(&service_info->recv_freed);
+ freed_acks = min_t(int, recv_freed,
+ VS_SERVICE_ID_TRANSPORT_MASK);
+ } while (recv_freed != atomic_cmpxchg(&service_info->recv_freed,
+ recv_freed, recv_freed - freed_acks));
+
+ service_id = service_info->service->id;
+ vs_set_service_id_reserved_bits(&service_id, freed_acks);
+ *(vs_service_id_t *)mbuf_real_base(mbuf) = service_id;
+
+ spin_lock_irqsave(&transport->readiness_lock, irqflags);
+ err = transport_send_might_queue(transport, mbuf,
+ service_info->service->id, flags, &queued);
+ if (err) {
+ /* We failed to send, so revert the freed acks */
+ if (atomic_add_return(freed_acks,
+ &service_info->recv_freed) >=
+ service_info->recv_freed_watermark) {
+ transport->free_bufs_pending = true;
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+ }
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+ return err;
+ }
+
+ atomic_dec(&service_info->send_alloc);
+
+ if (queued) {
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+ return 0;
+ }
+
+ /*
+ * The mbuf was sent successfully. We can free it locally since it is
+ * now owned by the remote end.
+ */
+ transport_free_sent_mbuf(transport, mbuf);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Send okay: service %d (0x%.2x) sq=%d/%d, alloc--=%d, rq=%d/%d, freed=%d/%d, bc=%d\n",
+ service_info->service->id, service_id,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota,
+ atomic_read(&service_info->send_alloc),
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota, freed_acks,
+ atomic_read(&service_info->recv_freed),
+ atomic_read(&transport->free_bufs_balance));
+
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+
+ return 0;
+}
+
+static void transport_free_bufs_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct vs_transport_axon *transport = container_of(dwork,
+ struct vs_transport_axon, free_bufs_work);
+ struct vs_mbuf_axon *mbuf;
+ int i, err, count = 0, old_balance;
+ bool queued;
+ size_t size;
+ u16 *p;
+
+ /*
+ * Atomically decide whether to send a message, and increment
+ * the balance if we are going to.
+ *
+ * We don't need barriers before these reads because they're
+ * implicit in the work scheduling.
+ */
+ do {
+ old_balance = atomic_read(&transport->free_bufs_balance);
+
+ /*
+ * We only try to send if the balance is negative,
+ * or if we have been triggered by going over a
+ * watermark.
+ */
+ if (old_balance >= 0 && !transport->free_bufs_pending)
+ return;
+
+ /*
+ * If we've hit the max balance, we can't send. The
+ * tasklet will be rescheduled next time the balance
+ * is decremented, if free_bufs_pending is true.
+ */
+ if (old_balance >= MAX_BALANCE)
+ return;
+
+ } while (old_balance != atomic_cmpxchg(&transport->free_bufs_balance,
+ old_balance, old_balance + 1));
+
+ /* Try to allocate a message buffer. */
+ mbuf = __transport_alloc_mbuf(transport, MSG_SEND_FREE_BUFS,
+ transport->free_bufs_pool,
+ transport->msg_size - sizeof(vs_service_id_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!mbuf) {
+ /* Out of memory at the moment; retry later. */
+ atomic_dec(&transport->free_bufs_balance);
+ schedule_delayed_work(dwork, FREE_BUFS_RETRY_DELAY);
+ return;
+ }
+
+ /*
+ * Clear free_bufs_pending, because we are going to try to send. We
+ * need a write barrier afterwards to guarantee that this write is
+ * ordered before any writes to the recv_freed counts, and therefore
+ * before any remote free_bufs_pending = true when a service goes
+ * over its watermark right after we inspect it.
+ *
+ * The matching barrier is implicit in the atomic_inc_return in
+ * transport_free_mbuf().
+ */
+ transport->free_bufs_pending = false;
+ smp_wmb();
+
+ /*
+ * Fill in the buffer. Message format is:
+ *
+ * u16: Number of services
+ *
+ * For each service:
+ * u16: Service ID
+ * u16: Number of freed buffers
+ */
+ p = mbuf->base.data;
+ *(p++) = 0;
+
+ for_each_set_bit(i, transport->service_bitmap,
+ VS_SERVICE_ID_BITMAP_BITS) {
+ struct vs_mv_service_info *service_info;
+ int recv_freed;
+ u16 freed_acks;
+
+ service_info = transport_get_service_id_info(transport, i);
+ if (!service_info)
+ continue;
+
+ /*
+ * Don't let the message exceed the maximum size for the
+ * transport.
+ */
+ size = sizeof(vs_service_id_t) + sizeof(u16) +
+ (count * (2 * sizeof(u16)));
+ if (size > transport->msg_size) {
+ /* FIXME: Jira ticket SDK-3131 - ryanm. */
+ transport_put_service_info(service_info);
+ transport->free_bufs_pending = true;
+ break;
+ }
+
+ /*
+ * We decrement each service's quota immediately by up to
+ * USHRT_MAX. If we subsequently fail to send the message then
+ * we return the count to what it was previously.
+ */
+ do {
+ recv_freed = atomic_read(&service_info->recv_freed);
+ freed_acks = min_t(int, USHRT_MAX, recv_freed);
+ } while (recv_freed != atomic_cmpxchg(
+ &service_info->recv_freed,
+ recv_freed, recv_freed - freed_acks));
+
+ if (freed_acks) {
+ if (freed_acks < recv_freed)
+ transport->free_bufs_pending = true;
+
+ *(p++) = service_info->service->id;
+ *(p++) = freed_acks;
+ count++;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] Freed %.2d buffers\n",
+ service_info->service->id,
+ freed_acks);
+ } else {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] No buffers to free\n",
+ service_info->service->id);
+ }
+
+ transport_put_service_info(service_info);
+ }
+
+ if (transport->free_bufs_pending)
+ schedule_delayed_work(dwork, 0);
+
+ if (count == 0 && old_balance >= 0) {
+ /*
+ * We are sending a new free bufs message, but we have no
+ * freed buffers to tell the other end about. We don't send
+ * an empty message unless the pre-increment balance was
+ * negative (in which case we need to ack a remote free_bufs).
+ *
+ * Note that nobody else can increase the balance, so we only
+ * need to check for a non-negative balance once before
+ * decrementing. However, if the incoming free-bufs handler
+ * concurrently decrements, the balance may become negative,
+ * in which case we reschedule ourselves immediately to send
+ * the ack.
+ */
+ if (atomic_dec_return(&transport->free_bufs_balance) < 0)
+ schedule_delayed_work(dwork, 0);
+
+ __transport_free_mbuf(transport, mbuf, false);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "No services had buffers to free\n");
+
+ return;
+ }
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Sending free bufs message for %d services\n", count);
+
+ /* Fix up the message size */
+ p = mbuf->base.data;
+ *p = count;
+ mbuf->base.size = sizeof(u16) * ((count * 2) + 1);
+
+ spin_lock_irq(&transport->readiness_lock);
+ err = transport_send_might_queue(transport, mbuf, MSG_SEND_FREE_BUFS,
+ 0, &queued);
+ if (err) {
+ spin_unlock_irq(&transport->readiness_lock);
+ goto fail;
+ }
+
+ /* FIXME: Jira ticket SDK-4675 - ryanm. */
+ if (!queued) {
+ /*
+ * The mbuf was sent successfully. We can free it locally
+ * since it is now owned by the remote end.
+ */
+ transport_free_sent_mbuf(transport, mbuf);
+ }
+ spin_unlock_irq(&transport->readiness_lock);
+
+ return;
+
+fail:
+ dev_err(transport->axon_dev,
+ "Failed to send free bufs message: %d\n", err);
+ transport_fatal_error(transport, "free bufs send failed");
+}
+
+int transport_notify(struct vs_transport *_transport,
+ struct vs_service_device *service, unsigned long bits)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ unsigned long bit_offset, bitmask, word;
+ int first_set_bit, spilled_bits;
+
+ BUG_ON(!transport);
+
+ if (!bits)
+ return -EINVAL;
+
+ /* Check that the service isn't trying to raise bits it doesn't own */
+ if (bits & ~((1UL << service->notify_send_bits) - 1))
+ return -EINVAL;
+
+ bit_offset = service->notify_send_offset;
+ word = BIT_WORD(bit_offset);
+ bitmask = bits << (bit_offset % BITS_PER_LONG);
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Sending notification %ld to service id %d\n", bitmask,
+ service->id);
+
+ _okl4_sys_vinterrupt_raise(transport->notify_cap[word], bitmask);
+
+ /*
+ * Bit range may spill into the next virqline.
+ *
+ * Check by adding the bit offset to the index of the highest set bit in
+ * the requested bitmask. If we need to raise a bit that is greater than
+ * bit 31, we have spilled into the next word and need to raise that too.
+ */
+ first_set_bit = find_first_bit(&bits, BITS_PER_LONG);
+ spilled_bits = first_set_bit + bit_offset - (BITS_PER_LONG - 1);
+ if (spilled_bits > 0) {
+ /*
+ * Calculate the new bitmask for the spilled bits. We do this by
+ * shifting the requested bits to the right. The number of shifts
+ * is determined on where the first spilled bit is.
+ */
+ int first_spilled_bit = first_set_bit - spilled_bits + 1;
+
+ bitmask = bits >> first_spilled_bit;
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Sending notification %ld to service id %d\n", bitmask,
+ service->id);
+
+ _okl4_sys_vinterrupt_raise(transport->notify_cap[word + 1], bitmask);
+ }
+
+ return 0;
+}
+
+static void
+transport_handle_free_bufs_message(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ struct vs_mv_service_info *service_info;
+ vs_service_id_t service_id;
+ u16 *p = mbuf->base.data;
+ int i, count, freed_acks, new_balance;
+
+ count = *(p++);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Free bufs message received for %d services\n", count);
+ for (i = 0; i < count; i++) {
+ int old_quota __maybe_unused;
+
+ service_id = *(p++);
+ freed_acks = *(p++);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, " [%.2d] %.4d\n",
+ service_id, freed_acks);
+
+ service_info = transport_get_service_id_info(transport,
+ service_id);
+ if (!service_info) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ "Got %d free_acks for unknown service %d\n",
+ freed_acks, service_id);
+ continue;
+ }
+
+ old_quota = atomic_read(&service_info->send_inflight);
+ freed_acks = reduce_send_quota(transport, service_info,
+ freed_acks, service_info->ready);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] Freed %.2d buffers (%d -> %d, quota = %d)\n",
+ service_id, freed_acks, old_quota,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota);
+
+ transport_put_service_info(service_info);
+ }
+
+ __transport_free_mbuf(transport, mbuf, true);
+
+ new_balance = atomic_dec_return(&transport->free_bufs_balance);
+ if (new_balance < -MAX_BALANCE) {
+ dev_err(transport->axon_dev,
+ "Balance counter fell below -MAX_BALANCE (%d < %d)\n",
+ atomic_read(&transport->free_bufs_balance),
+ -MAX_BALANCE);
+ transport_fatal_error(transport, "balance counter underrun");
+ return;
+ }
+
+ /* Check if we need to send a freed buffers message back */
+ if (new_balance < 0 || transport->free_bufs_pending)
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+ void *ptr, dma_addr_t laddr)
+{
+ struct okl4_axon_queue_entry *desc;
+ okl4_axon_data_info_t info;
+
+ /* Select the buffer desc to reallocate */
+ desc = &transport->rx_descs[transport->rx_uptr_allocated];
+ info = ACCESS_ONCE(desc->info);
+
+ /* If there is no space in the rx queue, fail */
+ if (okl4_axon_data_info_getusr(&info))
+ return -ENOSPC;
+
+ /* Don't update desc before reading the clear usr bit */
+ smp_mb();
+
+ /* Update the buffer pointer in the desc and mark it valid. */
+ transport->rx_ptrs[transport->rx_uptr_allocated] = ptr;
+ okl4_axon_data_info_setladdr(&info, (okl4_laddr_t)laddr);
+ okl4_axon_data_info_setpending(&info, true);
+ okl4_axon_data_info_setusr(&info, true);
+ mb();
+ ACCESS_ONCE(desc->info) = info;
+
+ /* Proceed to the next buffer */
+ INC_MOD(transport->rx_uptr_allocated,
+ transport->rx->queues[0].entries);
+
+ /* Return true if the next desc has no buffer yet */
+ desc = &transport->rx_descs[transport->rx_uptr_allocated];
+ return !okl4_axon_data_info_getusr(&desc->info);
+}
+
+/* TODO: multiple queue support / small message prioritisation */
+static int transport_process_msg(struct vs_transport_axon *transport)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id;
+ unsigned freed_acks;
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+ void **ptr;
+ okl4_axon_data_info_t info;
+
+ /* Select the descriptor to receive from */
+ uptr = ACCESS_ONCE(transport->rx->queues[0].uptr);
+ desc = &transport->rx_descs[uptr];
+ ptr = &transport->rx_ptrs[uptr];
+ info = ACCESS_ONCE(desc->info);
+
+ /* Have we emptied the whole queue? */
+ if (!okl4_axon_data_info_getusr(&info))
+ return -ENOBUFS;
+
+ /* Has the next buffer been filled yet? */
+ if (okl4_axon_data_info_getpending(&info))
+ return 0;
+
+ /* Don't read the buffer or desc before seeing a cleared pending bit */
+ rmb();
+
+ /* Is the message too small to be valid? */
+ if (desc->data_size < sizeof(vs_service_id_t))
+ return -EBADMSG;
+
+ /* Allocate and set up the mbuf */
+ mbuf = kmem_cache_alloc(mbuf_cache, GFP_ATOMIC);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->owner = transport;
+ mbuf->laddr = okl4_axon_data_info_getladdr(&info);
+ mbuf->pool = NULL;
+ mbuf->base.is_recv = true;
+ mbuf->base.data = *ptr + sizeof(vs_service_id_t);
+ mbuf->base.size = desc->data_size - sizeof(vs_service_id_t);
+
+ INC_MOD(uptr, transport->rx->queues[0].entries);
+ ACCESS_ONCE(transport->rx->queues[0].uptr) = uptr;
+
+ /* Finish reading desc before clearing usr bit */
+ smp_mb();
+
+ /* Re-check the pending bit, in case we've just been reset */
+ info = ACCESS_ONCE(desc->info);
+ if (unlikely(okl4_axon_data_info_getpending(&info))) {
+ kmem_cache_free(mbuf_cache, mbuf);
+ return 0;
+ }
+
+ /* Clear usr bit; after this point the buffer is owned by the mbuf */
+ okl4_axon_data_info_setusr(&info, false);
+ ACCESS_ONCE(desc->info) = info;
+
+ /* Determine who to deliver the mbuf to */
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), &freed_acks);
+
+ if (service_id == MSG_SEND_FREE_BUFS) {
+ transport_handle_free_bufs_message(transport, mbuf);
+ return 1;
+ }
+
+ service_info = transport_get_service_id_info(transport, service_id);
+ if (!service_info) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev, transport->axon_dev,
+ "discarding message for missing service %d\n",
+ service_id);
+ __transport_free_mbuf(transport, mbuf, true);
+ return -EIDRM;
+ }
+
+ /*
+ * If the remote end has freed some buffers that we sent it, then we
+ * can decrement our send quota count by that amount.
+ */
+ freed_acks = reduce_send_quota(transport, service_info,
+ freed_acks, service_info->ready);
+
+ /* If the service has been reset, drop the message. */
+ if (!service_info->ready) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev, transport->axon_dev,
+ "discarding message for reset service %d\n",
+ service_id);
+
+ __transport_free_mbuf(transport, mbuf, true);
+ transport_put_service_info(service_info);
+
+ return 1;
+ }
+
+ /*
+ * Increment our recv quota since we are now holding a buffer. We
+ * will decrement it when the buffer is freed in transport_free_mbuf.
+ */
+ if (!atomic_add_unless(&service_info->recv_inflight, 1,
+ service_info->service->recv_quota)) {
+ /*
+ * Going over the recv_quota indicates that something bad
+ * has happened because either the other end has exceeded
+ * its send quota or the two ends have a disagreement about
+ * what the quota is.
+ *
+ * We free the buffer and reset the transport.
+ */
+ dev_err(transport->axon_dev,
+ "Service %d is at max receive quota %d - resetting\n",
+ service_info->service->id,
+ service_info->service->recv_quota);
+
+ transport_fatal_error(transport, "rx quota exceeded");
+
+ __transport_free_mbuf(transport, mbuf, true);
+ transport_put_service_info(service_info);
+
+ return 0;
+ }
+
+ WARN_ON(atomic_read(&service_info->recv_inflight) >
+ service_info->service->recv_quota);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "receive %zu bytes from service 0x%.2x (%d): sq=%d/%d, rq=%d/%d, freed_acks=%d, freed=%d/%d bc=%d\n",
+ mbuf->base.size, service_info->service->id, service_id,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota,
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota, freed_acks,
+ atomic_read(&service_info->recv_freed),
+ service_info->recv_freed_watermark,
+ atomic_read(&transport->free_bufs_balance));
+ vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+ if (vs_session_handle_message(transport->session_dev, &mbuf->base,
+ service_id) < 0)
+ transport_free_mbuf(&transport->transport,
+ service_info->service, &mbuf->base);
+
+ transport_put_service_info(service_info);
+
+ return 1;
+}
+
+static void transport_flush_tx_queues(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ int i;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ /* Release any queued mbufs */
+ free_tx_mbufs(transport);
+
+ /*
+ * Re-attach the TX Axon's segment, which implicitly invalidates
+ * the queues and stops any outgoing message transfers. The queues
+ * will be reconfigured when the transport becomes ready again.
+ */
+ err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX reattach failed: %d\n",
+ (int)err);
+ }
+
+ /*
+ * The TX Axon has stopped, so we can safely clear the pending
+ * bit and free the buffer for any outgoing messages, and reset uptr
+ * and kptr to 0.
+ */
+ for (i = 0; i < transport->tx->queues[0].entries; i++) {
+ if (!transport->tx_pools[i])
+ continue;
+
+ okl4_axon_data_info_setpending(
+ &transport->tx_descs[i].info, false);
+ __transport_tx_pool_free(transport->tx_pools[i],
+ okl4_axon_data_info_getladdr(
+ &transport->tx_descs[i].info));
+ transport->tx_pools[i] = NULL;
+ }
+ transport->tx->queues[0].uptr = 0;
+ transport->tx->queues[0].kptr = 0;
+ transport->tx_uptr_freed = 0;
+}
+
+static void transport_flush_rx_queues(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ int i;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ /*
+ * Re-attach the TX Axon's segment, which implicitly invalidates
+ * the queues and stops any incoming message transfers, though those
+ * should already have cancelled those at the sending end. The queues
+ * will be reconfigured when the transport becomes ready again.
+ */
+ err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX reattach failed: %d\n",
+ (int)err);
+ }
+
+ /*
+ * The RX Axon has stopped, so we can reset the pending bit on all
+ * allocated message buffers to prepare them for reuse when the reset
+ * completes.
+ */
+ for (i = 0; i < transport->rx->queues[0].entries; i++) {
+ if (okl4_axon_data_info_getusr(&transport->rx_descs[i].info))
+ okl4_axon_data_info_setpending(
+ &transport->rx_descs[i].info, true);
+ }
+
+ /*
+ * Reset kptr to the current uptr.
+ *
+ * We use a barrier here to ensure the pending bits are reset before
+ * reading uptr, matching the barrier in transport_process_msg between
+ * the uptr update and the second check of the pending bit. This means
+ * that races with transport_process_msg() will end in one of two
+ * ways:
+ *
+ * 1. transport_process_msg() updates uptr before this barrier, so the
+ * RX buffer is passed up to the session layer to be rejected there
+ * and recycled; or
+ *
+ * 2. the reset pending bit is seen by the second check in
+ * transport_process_msg(), which knows that it is being reset and
+ * can drop the message before it claims the buffer.
+ */
+ smp_mb();
+ transport->rx->queues[0].kptr =
+ ACCESS_ONCE(transport->rx->queues[0].uptr);
+
+ /*
+ * Cancel any pending freed bufs work. We can't flush it here, but
+ * that is OK: we will do so before we become ready.
+ */
+ cancel_delayed_work(&transport->free_bufs_work);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ unsigned long flags;
+ bool reset_complete = false;
+
+ spin_lock_irqsave(&transport->readiness_lock, flags);
+
+ /*
+ * Reset the transport, dumping any messages in transit, and tell the
+ * remote end that it should do the same.
+ *
+ * We only do this if the transport is not already marked reset. Doing
+ * otherwise would be redundant.
+ */
+ if ((transport->readiness != VS_TRANSPORT_RESET) &&
+ transport->readiness != VS_TRANSPORT_LOCAL_RESET &&
+ transport->readiness != VS_TRANSPORT_REMOTE_READY) {
+ /*
+ * Flush the Axons' TX queues. We can't flush the RX queues
+ * until after the remote end has acknowledged the reset.
+ */
+ transport_flush_tx_queues(transport);
+
+ /*
+ * Raise a reset request VIRQ, and discard any incoming reset
+ * or ready notifications as they are now stale. Note that we
+ * must do this in a single syscall.
+ */
+ err = _okl4_sys_vinterrupt_clear_and_raise(
+ transport->reset_okl4_irq,
+ transport->reset_cap, 0UL,
+ VS_TRANSPORT_VIRQ_RESET_REQ).error;
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "Reset raise failed: %d\n",
+ (int)err);
+ }
+
+ /* Local reset is complete */
+ if (transport->readiness != VS_TRANSPORT_SHUTDOWN)
+ transport->readiness = VS_TRANSPORT_LOCAL_RESET;
+ } else {
+ /* Already in reset */
+ reset_complete = true;
+ }
+
+ spin_unlock_irqrestore(&transport->readiness_lock, flags);
+
+ return reset_complete;
+}
+
+static void transport_reset(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "reset\n");
+
+ if (transport_axon_reset(transport)) {
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "reset while already reset (no-op)\n");
+
+ vs_session_handle_reset(transport->session_dev);
+ }
+}
+
+static void transport_ready(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ okl4_error_t err;
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "%s: becoming ready\n", __func__);
+
+ /*
+ * Make sure any previously scheduled freed bufs work is cancelled.
+ * It should not be possible for this to be rescheduled later, as long
+ * as the transport is in reset.
+ */
+ cancel_delayed_work_sync(&transport->free_bufs_work);
+ spin_lock_irq(&transport->readiness_lock);
+
+ atomic_set(&transport->free_bufs_balance, 0);
+ transport->free_bufs_pending = false;
+
+ switch(transport->readiness) {
+ case VS_TRANSPORT_RESET:
+ transport->readiness = VS_TRANSPORT_LOCAL_READY;
+ break;
+ case VS_TRANSPORT_REMOTE_READY:
+ vs_session_handle_activate(transport->session_dev);
+ transport->readiness = VS_TRANSPORT_ACTIVE;
+ break;
+ case VS_TRANSPORT_LOCAL_RESET:
+ /*
+ * Session layer is confused; usually due to the reset at init
+ * time, which it did not explicitly request, not having
+ * completed yet. We just ignore it and wait for the reset. We
+ * could avoid this by not starting the session until the
+ * startup reset completes.
+ */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ case VS_TRANSPORT_SHUTDOWN:
+ /* Do nothing. */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ default:
+ /* Session layer is broken */
+ WARN(1, "transport_ready() called in the wrong state: %d",
+ transport->readiness);
+ goto fail;
+ }
+
+ /* Raise a ready notification VIRQ. */
+ err = _okl4_sys_vinterrupt_raise(transport->reset_cap,
+ VS_TRANSPORT_VIRQ_READY);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "Ready raise failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ /*
+ * Set up the Axons' queue pointers.
+ */
+ err = _okl4_sys_axon_set_send_area(transport->tx_cap,
+ transport->tx_phys, transport->tx_size);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX set area failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_send_queue(transport->tx_cap,
+ transport->tx_phys);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX set queue failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_recv_area(transport->rx_cap,
+ transport->rx_phys, transport->rx_size);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX set area failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_recv_queue(transport->rx_cap,
+ transport->rx_phys);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX set queue failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+
+fail:
+ spin_unlock_irq(&transport->readiness_lock);
+
+ transport_axon_reset(transport);
+}
+
+static int transport_service_add(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mv_service_info *service_info;
+
+ /*
+ * We can't print out the core service add because the session
+ * isn't fully registered at that time.
+ */
+ if (service->id != 0)
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Add service - id = %d\n", service->id);
+
+ service_info = kzalloc(sizeof(*service_info), GFP_KERNEL);
+ if (!service_info)
+ return -ENOMEM;
+
+ kref_init(&service_info->kref);
+
+ /* Matching vs_put_service() is in transport_info_free */
+ service_info->service = vs_get_service(service);
+
+ /* Make the service_info visible */
+ rcu_assign_pointer(service->transport_priv, service_info);
+
+ __set_bit(service->id, transport->service_bitmap);
+
+ return 0;
+}
+
+static void transport_service_remove(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mv_service_info *service_info;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Remove service - id = %d\n",
+ service->id);
+
+ __clear_bit(service->id, transport->service_bitmap);
+
+ service_info = service->transport_priv;
+ rcu_assign_pointer(service->transport_priv, NULL);
+
+ if (service_info->ready) {
+ dev_err(transport->axon_dev,
+ "Removing service %d while ready\n",
+ service->id);
+ transport_fatal_error(transport, "removing ready service");
+ }
+
+ transport_put_service_info(service_info);
+}
+
+static struct vs_axon_tx_pool *
+transport_axon_init_tx_pool(struct vs_transport_axon *transport,
+ size_t msg_size, unsigned send_quota)
+{
+ struct vs_axon_tx_pool *pool;
+
+ pool = devm_kzalloc(transport->axon_dev, sizeof(*pool) +
+ (sizeof(unsigned long) * BITS_TO_LONGS(send_quota)),
+ GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->transport = transport;
+ pool->alloc_order = ilog2(msg_size + sizeof(vs_service_id_t));
+ pool->count = send_quota;
+
+ pool->base_vaddr = dmam_alloc_coherent(transport->axon_dev,
+ send_quota << pool->alloc_order, &pool->base_laddr,
+ GFP_KERNEL);
+ if (!pool->base_vaddr) {
+ dev_err(transport->axon_dev, "Couldn't allocate %lu times %zu bytes for TX\n",
+ (unsigned long)pool->count, (size_t)1 << pool->alloc_order);
+ devm_kfree(transport->axon_dev, pool);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&pool->kref);
+ return pool;
+}
+
+static int transport_service_start(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_notify_info *info;
+ int i, ret;
+ bool enable_rx;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Start service - id = %d\n",
+ service->id);
+
+ service_info = service->transport_priv;
+ __transport_get_service_info(service_info);
+
+ /* We shouldn't have any mbufs left from before the last reset. */
+ if (WARN_ON(atomic_read(&service_info->outstanding_frees))) {
+ transport_put_service_info(service_info);
+ return -EBUSY;
+ }
+
+ /*
+ * The watermark is set to half of the received-message quota, rounded
+ * down, plus one. This is fairly arbitrary. The constant offset
+ * ensures that we don't set it to 0 for services with 1 quota (and
+ * thus trigger infinite free_bufs messages).
+ */
+ service_info->recv_freed_watermark = (service->recv_quota + 1) / 2;
+
+ if (WARN_ON(service->notify_recv_bits + service->notify_recv_offset >
+ transport->notify_rx_nirqs * BITS_PER_LONG)) {
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(service->notify_send_bits + service->notify_send_offset >
+ transport->notify_tx_nirqs * BITS_PER_LONG)) {
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ /* This is called twice for the core client only. */
+ WARN_ON(service->id != 0 && service_info->ready);
+
+ if (!service_info->ready) {
+ WARN_ON(atomic_read(&service_info->send_alloc));
+ WARN_ON(atomic_read(&service_info->recv_freed));
+ WARN_ON(atomic_read(&service_info->recv_inflight));
+ }
+
+ /* Create the TX buffer pool. */
+ WARN_ON(service->send_quota && service_info->tx_pool);
+ if (service->send_quota) {
+ service_info->tx_pool = transport_axon_init_tx_pool(transport,
+ transport->msg_size, service->send_quota);
+ if (IS_ERR(service_info->tx_pool)) {
+ ret = PTR_ERR(service_info->tx_pool);
+ service_info->tx_pool = NULL;
+ transport_put_service_info(service_info);
+ return ret;
+ }
+ }
+
+ /* Preallocate some RX buffers, if necessary. */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ i = min(transport->rx_alloc_extra,
+ service->recv_quota - service_info->rx_allocated);
+ transport->rx_alloc_extra -= i;
+ service_info->rx_allocated += i;
+ spin_unlock_irq(&transport->rx_alloc_lock);
+
+ for (; service_info->rx_allocated < service->recv_quota;
+ service_info->rx_allocated++) {
+ dma_addr_t laddr;
+ struct vs_axon_rx_freelist_entry *buf =
+ dma_pool_alloc(transport->rx_pool, GFP_KERNEL, &laddr);
+ if (WARN_ON(!buf))
+ break;
+ buf->laddr = laddr;
+
+ spin_lock_irq(&transport->rx_alloc_lock);
+ list_add(&buf->list, &transport->rx_freelist);
+ spin_unlock_irq(&transport->rx_alloc_lock);
+ }
+
+ for (i = 0; i < service->notify_recv_bits; i++) {
+ unsigned bit = i + service->notify_recv_offset;
+ info = &transport->transport.notify_info[bit];
+
+ info->service_id = service->id;
+ info->offset = service->notify_recv_offset;
+ }
+
+ atomic_set(&service_info->send_inflight, 0);
+
+ /*
+ * If this is the core service and it wasn't ready before, we need to
+ * enable RX for the whole transport.
+ */
+ enable_rx = service->id == 0 && !service_info->ready;
+
+ service_info->ready = true;
+
+ /* We're now ready to receive. */
+ if (enable_rx)
+ tasklet_enable(&transport->rx_tasklet);
+
+ transport_put_service_info(service_info);
+
+ return 0;
+}
+
+static int transport_service_reset(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *child, *tmp;
+ int ret = 0, service_id, send_remaining, recv_remaining;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Reset service - id = %d\n",
+ service->id);
+
+ service_info = service->transport_priv;
+ __transport_get_service_info(service_info);
+
+ /*
+ * Clear the ready bit with the tasklet disabled. After this point,
+ * incoming messages will be discarded by transport_process_msg()
+ * without incrementing recv_inflight, so we won't spuriously see
+ * nonzero recv_inflight values for messages that would be discarded
+ * in the session layer.
+ */
+ tasklet_disable(&transport->rx_tasklet);
+ service_info->ready = false;
+ if (service->id)
+ tasklet_enable(&transport->rx_tasklet);
+
+ /*
+ * Cancel and free all pending outgoing messages for the service being
+ * reset; i.e. those that have been sent by the service but are not
+ * yet in the axon queue.
+ *
+ * Note that this does not clean out the axon queue; messages there
+ * are already visible to OKL4 and may be transferred at any time,
+ * so we treat those as already sent.
+ */
+ spin_lock_irq(&transport->readiness_lock);
+ list_for_each_entry_safe(child, tmp, &transport->tx_queue, base.queue) {
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(child), NULL);
+ if (service_id == service->id) {
+ list_del(&child->base.queue);
+ __transport_tx_pool_free(child->pool, child->laddr);
+ }
+ }
+ spin_unlock_irq(&transport->readiness_lock);
+
+ /*
+ * If any buffers remain allocated, we mark them as outstanding frees.
+ * The transport will remain disabled until this count goes to zero.
+ */
+ send_remaining = atomic_read(&service_info->send_alloc);
+ recv_remaining = atomic_read(&service_info->recv_inflight);
+ ret = atomic_add_return(send_remaining + recv_remaining,
+ &service_info->outstanding_frees);
+ dev_dbg(transport->axon_dev, "reset service %d with %d outstanding (send %d, recv %d)\n",
+ service->id, ret, send_remaining, recv_remaining);
+
+ /*
+ * Reduce the send alloc count to 0, accounting for races with frees,
+ * which might have reduced either the alloc count or the outstanding
+ * count.
+ */
+ while (send_remaining > 0) {
+ unsigned new_send_remaining = atomic_cmpxchg(
+ &service_info->send_alloc, send_remaining, 0);
+ if (send_remaining == new_send_remaining) {
+ smp_mb();
+ break;
+ }
+ WARN_ON(send_remaining < new_send_remaining);
+ ret = atomic_sub_return(send_remaining - new_send_remaining,
+ &service_info->outstanding_frees);
+ send_remaining = new_send_remaining;
+ dev_dbg(transport->axon_dev, "failed to zero send quota, now %d outstanding (%d send)\n",
+ ret, send_remaining);
+ }
+
+ /* Repeat the above for the recv inflight count. */
+ while (recv_remaining > 0) {
+ unsigned new_recv_remaining = atomic_cmpxchg(
+ &service_info->recv_inflight, recv_remaining,
+ 0);
+ if (recv_remaining == new_recv_remaining) {
+ smp_mb();
+ break;
+ }
+ WARN_ON(recv_remaining < new_recv_remaining);
+ ret = atomic_sub_return(recv_remaining - new_recv_remaining,
+ &service_info->outstanding_frees);
+ recv_remaining = new_recv_remaining;
+ dev_dbg(transport->axon_dev, "failed to zero recv quota, now %d outstanding (%d send)\n",
+ ret, recv_remaining);
+ }
+
+ /* The outstanding frees count should never go negative */
+ WARN_ON(ret < 0);
+
+ /* Discard any outstanding freed buffer notifications. */
+ atomic_set(&service_info->recv_freed, 0);
+
+ /*
+ * Wait for any previously queued free_bufs work to finish. This
+ * guarantees that any freed buffer notifications that are already in
+ * progress will be sent to the remote end before we return, and thus
+ * before the reset is signalled.
+ */
+ flush_delayed_work(&transport->free_bufs_work);
+
+ if (!ret)
+ transport_free_mbuf_pools(transport, service, service_info);
+
+ transport_put_service_info(service_info);
+
+ return ret;
+}
+
+static ssize_t transport_service_send_avail(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ ssize_t count = 0;
+
+ service_info = service->transport_priv;
+ if (!service_info)
+ return -EINVAL;
+
+ __transport_get_service_info(service_info);
+
+ count = service->send_quota -
+ atomic_read(&service_info->send_inflight);
+
+ transport_put_service_info(service_info);
+
+ return count < 0 ? 0 : count;
+}
+
+static void transport_get_notify_bits(struct vs_transport *_transport,
+ unsigned *send_notify_bits, unsigned *recv_notify_bits)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ *send_notify_bits = transport->notify_tx_nirqs * BITS_PER_LONG;
+ *recv_notify_bits = transport->notify_rx_nirqs * BITS_PER_LONG;
+}
+
+static void transport_get_quota_limits(struct vs_transport *_transport,
+ unsigned *send_quota, unsigned *recv_quota)
+{
+ /*
+ * This driver does not need to enforce a quota limit, because message
+ * buffers are allocated from the kernel heap rather than a fixed
+ * buffer area. The queue length only determines the maximum size of
+ * a message batch, and the number of preallocated RX buffers.
+ *
+ * Note that per-service quotas are still enforced; there is simply no
+ * hard limit on the total of all service quotas.
+ */
+
+ *send_quota = UINT_MAX;
+ *recv_quota = UINT_MAX;
+}
+
+static const struct vs_transport_vtable tvt = {
+ .alloc_mbuf = transport_alloc_mbuf,
+ .free_mbuf = transport_free_mbuf,
+ .mbuf_size = transport_mbuf_size,
+ .max_mbuf_size = transport_max_mbuf_size,
+ .send = transport_send,
+ .flush = transport_flush,
+ .notify = transport_notify,
+ .reset = transport_reset,
+ .ready = transport_ready,
+ .service_add = transport_service_add,
+ .service_remove = transport_service_remove,
+ .service_start = transport_service_start,
+ .service_reset = transport_service_reset,
+ .service_send_avail = transport_service_send_avail,
+ .get_notify_bits = transport_get_notify_bits,
+ .get_quota_limits = transport_get_quota_limits,
+};
+
+/* Incoming notification handling for client */
+static irqreturn_t transport_axon_notify_virq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+ struct vs_notify_info *n_info;
+ unsigned long offset, bit = 0, notification;
+ int word;
+ okl4_virq_flags_t payload = okl4_get_virq_payload(irq);
+
+ for (word = 0; word < transport->notify_rx_nirqs; word++)
+ if (irq == transport->notify_irq[word])
+ break;
+
+ if (word == transport->notify_rx_nirqs) {
+ dev_err(transport->axon_dev, "Bad IRQ %d\n", irq);
+ return IRQ_NONE;
+ }
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Got notification irq\n");
+
+#if defined(__BIG_ENDIAN)
+ /*
+ * We rely on being able to use the Linux bitmap operations directly
+ * on the VIRQ payload.
+ */
+ BUILD_BUG_ON((sizeof(payload) % sizeof(unsigned long)) != 0);
+#endif
+
+ for_each_set_bit(bit, (unsigned long *)&payload, sizeof(payload) * 8) {
+ offset = bit + word * BITS_PER_LONG;
+
+ /*
+ * We need to know which service id is associated
+ * with which notification bit here. The transport is informed
+ * about notification bit - service id mapping during the
+ * initialhandshake protocol.
+ */
+ n_info = &transport->transport.notify_info[offset];
+
+ notification = 1UL << (offset - n_info->offset);
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Got notification bit %lu for service %d\n",
+ notification, n_info->service_id);
+
+ /* FIXME: Jira ticket SDK-2145 - shivanik. */
+ vs_session_handle_notify(transport->session_dev, notification,
+ n_info->service_id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_reset_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+ bool do_reset = false;
+
+ u32 payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&transport->readiness_lock);
+
+ if (payload & VS_TRANSPORT_VIRQ_RESET_REQ) {
+ okl4_error_t err;
+
+ transport->readiness = VS_TRANSPORT_RESET;
+
+ /* Flush the queues in both directions */
+ transport_flush_tx_queues(transport);
+ transport_flush_rx_queues(transport);
+
+ /*
+ * When sending an ack, it is important to cancel any earlier
+ * ready notification, so the recipient can safely assume that
+ * the ack precedes any ready it sees
+ */
+ err = _okl4_sys_vinterrupt_modify(transport->reset_cap,
+ ~VS_TRANSPORT_VIRQ_READY,
+ VS_TRANSPORT_VIRQ_RESET_ACK);
+ if (err != OKL4_OK) {
+ dev_warn(transport->axon_dev,
+ "Error sending reset ack: %d\n", (int)err);
+ }
+
+ /*
+ * Discard any pending ready event; it must have happened
+ * before the reset request was raised, because we had not
+ * yet sent the reset ack.
+ */
+ payload = 0;
+ do_reset = true;
+ } else if (payload & VS_TRANSPORT_VIRQ_RESET_ACK) {
+ transport->readiness = VS_TRANSPORT_RESET;
+
+ /*
+ * Flush the RX queues, as we know at this point that the
+ * other end has flushed its TX queues.
+ */
+ transport_flush_rx_queues(transport);
+
+ /*
+ * Preserve any pending ready event; it must have been
+ * generated after the ack (see above)
+ */
+ payload &= VS_TRANSPORT_VIRQ_READY;
+ do_reset = true;
+ }
+
+ if (do_reset) {
+ /*
+ * Reset the session. Note that duplicate calls to this are
+ * expected if there are duplicate resets; they don't
+ * necessarily match activate calls.
+ */
+ vs_session_handle_reset(transport->session_dev);
+ }
+
+ if (payload & VS_TRANSPORT_VIRQ_READY) {
+ if (transport->readiness == VS_TRANSPORT_RESET) {
+ transport->readiness = VS_TRANSPORT_REMOTE_READY;
+ } else if (transport->readiness == VS_TRANSPORT_LOCAL_READY) {
+ vs_session_handle_activate(transport->session_dev);
+ transport->readiness = VS_TRANSPORT_ACTIVE;
+ } else {
+ /* Ready lost a race with reset; ignore it. */
+ }
+ }
+
+ spin_unlock(&transport->readiness_lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Axon VIRQ handling.
+ */
+static irqreturn_t transport_axon_rx_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+ okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+ if (okl4_axon_virq_flags_getfault(&flags)) {
+ dev_err_ratelimited(transport->axon_dev,
+ "fault on RX axon buffer or queue; resetting\n");
+ transport_axon_reset(transport);
+ } else if (okl4_axon_virq_flags_getready(&flags)) {
+ tasklet_schedule(&transport->rx_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_tx_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+ okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+ if (okl4_axon_virq_flags_getfault(&flags)) {
+ dev_err_ratelimited(transport->axon_dev,
+ "fault on TX axon buffer or queue; resetting\n");
+ transport_axon_reset(transport);
+ } else if (okl4_axon_virq_flags_getready(&flags)) {
+ spin_lock(&transport->readiness_lock);
+ if (!list_empty(&transport->tx_queue))
+ tasklet_schedule(&transport->tx_tasklet);
+ spin_unlock(&transport->readiness_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void transport_rx_tasklet(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+ int status;
+ struct _okl4_sys_axon_process_recv_return recv_result;
+
+ /* Refill the RX queue */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ while (!list_empty(&transport->rx_freelist)) {
+ struct vs_axon_rx_freelist_entry *buf;
+ buf = list_first_entry(&transport->rx_freelist,
+ struct vs_axon_rx_freelist_entry, list);
+ list_del(&buf->list);
+ status = transport_rx_queue_buffer(transport, buf, buf->laddr);
+ if (status < 0)
+ list_add(&buf->list, &transport->rx_freelist);
+ if (status <= 0)
+ break;
+ }
+ spin_unlock_irq(&transport->rx_alloc_lock);
+
+ /* Start the transfer */
+ recv_result = _okl4_sys_axon_process_recv(transport->rx_cap,
+ MAX_TRANSFER_CHUNK);
+
+ if (recv_result.error == OKL4_OK) {
+ status = 1;
+ } else {
+ status = okl4_error_to_errno(recv_result.error);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "rx syscall fail: %d",
+ status);
+ }
+
+ /* Process the received messages */
+ while (status > 0)
+ status = transport_process_msg(transport);
+
+ if (status == -ENOMEM) {
+ /* Give kswapd some time to reclaim pages */
+ mod_timer(&transport->rx_retry_timer, jiffies + HZ);
+ } else if (status == -ENOBUFS) {
+ /*
+ * Reschedule ourselves if more RX buffers are available,
+ * otherwise do nothing until a buffer is freed
+ */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ if (!list_empty(&transport->rx_freelist))
+ tasklet_schedule(&transport->rx_tasklet);
+ spin_unlock_irq(&transport->rx_alloc_lock);
+ } else if (!status && !recv_result.send_empty) {
+ /* There are more messages waiting; reschedule */
+ tasklet_schedule(&transport->rx_tasklet);
+ } else if (status < 0 && status != -ECONNRESET) {
+ /* Something else went wrong, other than a reset */
+ dev_err(transport->axon_dev, "Fatal RX error %d\n", status);
+ transport_fatal_error(transport, "rx failure");
+ } else {
+ /* Axon is empty; wait for an RX interrupt */
+ }
+}
+
+static void transport_tx_tasklet(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id;
+ int err;
+
+ spin_lock_irq(&transport->readiness_lock);
+
+ /* Check to see if there is anything in the queue to send */
+ if (list_empty(&transport->tx_queue)) {
+ /*
+ * Queue is empty, probably because a service reset cancelled
+ * some pending messages. Nothing to do.
+ */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ }
+
+ /*
+ * Try to send the mbuf. If it can't, the channel must be
+ * full again so wait until the next can send event.
+ */
+ mbuf = list_first_entry(&transport->tx_queue, struct vs_mbuf_axon,
+ base.queue);
+
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), NULL);
+
+ err = __transport_send(transport, mbuf, service_id,
+ VS_TRANSPORT_SEND_FLAGS_MORE);
+ if (err == -ENOSPC) {
+ /*
+ * The channel is currently full. Leave the message in the
+ * queue and try again when it has emptied.
+ */
+ __transport_flush(transport);
+ goto out_unlock;
+ }
+ if (err) {
+ /*
+ * We cannot properly handle a message send error here because
+ * we have already returned success for the send to the service
+ * driver when the message was queued. We don't want to leave
+ * the message in the queue, since it could cause a DoS if the
+ * error is persistent. Give up and force a transport reset.
+ */
+ dev_err(transport->axon_dev,
+ "Failed to send queued mbuf: %d\n", err);
+ spin_unlock_irq(&transport->readiness_lock);
+ transport_fatal_error(transport, "queued send failure");
+ return;
+ }
+
+ /* Message sent, remove it from the queue and free the local copy */
+ list_del(&mbuf->base.queue);
+ transport_free_sent_mbuf(transport, mbuf);
+
+ /* Check to see if we have run out of messages to send */
+ if (list_empty(&transport->tx_queue)) {
+ /* Nothing left in the queue; flush and return */
+ __transport_flush(transport);
+ } else {
+ /* Reschedule to send the next message */
+ tasklet_schedule(&transport->tx_tasklet);
+ }
+
+out_unlock:
+ spin_unlock_irq(&transport->readiness_lock);
+}
+
+static void transport_rx_retry_timer(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+
+ /* Try to receive again; hopefully we have memory now */
+ tasklet_schedule(&transport->rx_tasklet);
+}
+
+/* Transport device management */
+
+static int alloc_notify_info(struct device *dev, struct vs_notify_info **info,
+ int *info_size, int virqs)
+{
+ /* Each VIRQ can handle BITS_PER_LONG notifications */
+ *info_size = sizeof(struct vs_notify_info) * (virqs * BITS_PER_LONG);
+ *info = devm_kzalloc(dev, *info_size, GFP_KERNEL);
+ if (!(*info))
+ return -ENOMEM;
+
+ memset(*info, 0, *info_size);
+ return 0;
+}
+
+static int transport_axon_probe_virqs(struct vs_transport_axon *transport)
+{
+ struct device *device = transport->axon_dev;
+ struct device_node *axon_node = device->of_node;
+ struct device_node *vs_node = transport->of_node;
+ struct irq_data *irqd;
+ struct property *irqlines;
+ int ret, num_virq_lines;
+ struct device_node *virq_node = NULL;
+ u32 cap;
+ int i, irq_count;
+
+ if (of_irq_count(axon_node) < 2) {
+ dev_err(device, "Missing axon interrupts\n");
+ return -ENODEV;
+ }
+
+ irq_count = of_irq_count(vs_node);
+ if (irq_count < 1) {
+ dev_err(device, "Missing reset interrupt\n");
+ return -ENODEV;
+ } else if (irq_count > 1 + MAX_NOTIFICATION_LINES) {
+ dev_warn(device,
+ "Too many notification interrupts; only the first %d will be used\n",
+ MAX_NOTIFICATION_LINES);
+ }
+
+ /* Find the TX and RX axon IRQs and the reset IRQ */
+ transport->tx_irq = irq_of_parse_and_map(axon_node, 0);
+ if (!transport->tx_irq) {
+ dev_err(device, "No TX IRQ\n");
+ return -ENODEV;
+ }
+
+ transport->rx_irq = irq_of_parse_and_map(axon_node, 1);
+ if (!transport->rx_irq) {
+ dev_err(device, "No RX IRQ\n");
+ return -ENODEV;
+ }
+
+ transport->reset_irq = irq_of_parse_and_map(vs_node, 0);
+ if (!transport->reset_irq) {
+ dev_err(device, "No reset IRQ\n");
+ return -ENODEV;
+ }
+ irqd = irq_get_irq_data(transport->reset_irq);
+ if (!irqd) {
+ dev_err(device, "No reset IRQ data\n");
+ return -ENODEV;
+ }
+ transport->reset_okl4_irq = irqd_to_hwirq(irqd);
+
+ /* Find the notification IRQs */
+ transport->notify_rx_nirqs = irq_count - 1;
+ for (i = 0; i < transport->notify_rx_nirqs; i++) {
+ transport->notify_irq[i] = irq_of_parse_and_map(vs_node,
+ i + 1);
+ if (!transport->notify_irq[i]) {
+ dev_err(device, "Bad notify IRQ\n");
+ return -ENODEV;
+ }
+ }
+
+ /* Find all outgoing virq lines */
+ irqlines = of_find_property(vs_node, "okl,interrupt-lines", NULL);
+ if (!irqlines || irqlines->length < sizeof(u32)) {
+ dev_err(device, "No VIRQ sources found");
+ return -ENODEV;
+ }
+ num_virq_lines = irqlines->length / sizeof(u32);
+
+ virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines", 0);
+ if (!virq_node) {
+ dev_err(device, "No reset VIRQ line object\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(virq_node, "reg", &cap);
+ if (ret || cap == OKL4_KCAP_INVALID) {
+ dev_err(device, "Bad reset VIRQ line\n");
+ return -ENODEV;
+ }
+ transport->reset_cap = cap;
+
+ transport->notify_tx_nirqs = num_virq_lines - 1;
+ for (i = 0; i < transport->notify_tx_nirqs; i++) {
+ virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines",
+ i + 1);
+ if (!virq_node) {
+ dev_err(device, "No notify VIRQ line object\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(virq_node, "reg", &cap);
+ if (ret || cap == OKL4_KCAP_INVALID) {
+ dev_err(device, "Bad notify VIRQ line\n");
+ return -ENODEV;
+ }
+ transport->notify_cap[i] = cap;
+ }
+
+ return 0;
+}
+
+static int transport_axon_request_irqs(struct vs_transport_axon *transport)
+{
+ struct device *device = transport->axon_dev;
+ int i, ret;
+
+ ret = devm_request_irq(device, transport->reset_irq,
+ transport_axon_reset_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(device, transport->tx_irq,
+ transport_axon_tx_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(device, transport->rx_irq,
+ transport_axon_rx_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < transport->notify_rx_nirqs; i++) {
+ ret = devm_request_irq(device, transport->notify_irq[i],
+ transport_axon_notify_virq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int transport_axon_setup_descs(struct vs_transport_axon *transport)
+{
+ const int rx_buffer_order = ilog2(transport->msg_size +
+ sizeof(vs_service_id_t));
+ const size_t rx_queue_size = sizeof(*transport->rx) +
+ (sizeof(*transport->rx_descs) * transport->queue_size) +
+ (sizeof(*transport->rx_ptrs) * transport->queue_size);
+ const size_t tx_queue_size = sizeof(*transport->tx) +
+ (sizeof(*transport->tx_descs) * transport->queue_size);
+ const size_t queue_size = ALIGN(rx_queue_size,
+ __alignof__(*transport->tx)) + tx_queue_size;
+
+ struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+ void *queue;
+ struct device_node *seg_node;
+ u32 seg_index;
+ okl4_kcap_t seg_cap;
+ okl4_error_t err;
+ dma_addr_t dma_handle;
+ const __be32 *prop;
+ int len, ret;
+
+ /*
+ * Allocate memory for the queue descriptors.
+ *
+ * We allocate one block for both rx and tx because the minimum
+ * allocation from dmam_alloc_coherent is usually a whole page.
+ */
+ ret = -ENOMEM;
+ queue = dmam_alloc_coherent(transport->axon_dev, queue_size,
+ &dma_handle, GFP_KERNEL);
+ if (queue == NULL) {
+ dev_err(transport->axon_dev, "Failed to allocate %zd bytes for queue descriptors\n",
+ queue_size);
+ goto fail_alloc_dma;
+ }
+ memset(queue, 0, queue_size);
+
+ /*
+ * Find the OKL4 physical segment object to attach to the axons.
+ *
+ * If the device has a CMA area, and the cell's memory segments have
+ * not been split unnecessarily, then all allocations through the DMA
+ * API for this device will be within a single segment. So, we can
+ * simply look up the segment that contains the queue.
+ *
+ * The location and size of the CMA area can be configured elsewhere.
+ * In 3.12 and later a device-specific area can be reserved via the
+ * standard device tree reserved-memory properties. Otherwise, the
+ * global area will be used, which has a size configurable on the
+ * kernel command line and defaults to 16MB.
+ */
+
+ /* Locate the physical segment */
+ ret = -ENODEV;
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ dma_handle >> OKL4_DEFAULT_PAGEBITS, -1);
+ err = okl4_mmu_lookup_index_geterror(&lookup_return.segment_index);
+ if (err == OKL4_ERROR_NOT_IN_SEGMENT) {
+ dev_err(transport->axon_dev,
+ "No segment found for DMA address %pK (%#llx)!\n",
+ queue, (unsigned long long)dma_handle);
+ goto fail_lookup_segment;
+ }
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev,
+ "Could not look up segment for DMA address %pK (%#llx): OKL4 error %d\n",
+ queue, (unsigned long long)dma_handle,
+ (int)err);
+ goto fail_lookup_segment;
+ }
+ seg_index = okl4_mmu_lookup_index_getindex(&lookup_return.segment_index);
+
+ dev_dbg(transport->axon_dev, "lookup pn %#lx got error %ld segment %ld count %lu offset %#lx\n",
+ (long)(dma_handle >> OKL4_DEFAULT_PAGEBITS),
+ (long)err, (long)seg_index,
+ (unsigned long)lookup_return.count_pn,
+ (unsigned long)lookup_return.offset_pn);
+
+ /* Locate the physical segment's OF node */
+ for_each_compatible_node(seg_node, NULL, "okl,microvisor-segment") {
+ u32 attach_index;
+ ret = of_property_read_u32(seg_node, "okl,segment-attachment",
+ &attach_index);
+ if (attach_index == seg_index)
+ break;
+ }
+ if (seg_node == NULL) {
+ ret = -ENXIO;
+ dev_err(transport->axon_dev, "No physical segment found for %pK\n",
+ queue);
+ goto fail_lookup_segment;
+ }
+
+ /* Determine the physical segment's cap */
+ prop = of_get_property(seg_node, "reg", &len);
+ ret = !!prop ? 0 : -EPERM;
+ if (!ret)
+ seg_cap = of_read_number(prop, of_n_addr_cells(seg_node));
+ if (!ret && seg_cap == OKL4_KCAP_INVALID)
+ ret = -ENXIO;
+ if (ret < 0) {
+ dev_err(transport->axon_dev, "missing physical-segment cap\n");
+ goto fail_lookup_segment;
+ }
+ transport->segment = seg_cap;
+ transport->segment_base =
+ (round_down(dma_handle >> OKL4_DEFAULT_PAGEBITS,
+ lookup_return.count_pn) -
+ lookup_return.offset_pn) << OKL4_DEFAULT_PAGEBITS;
+
+ dev_dbg(transport->axon_dev, "physical segment cap is %#lx, base %#llx\n",
+ (unsigned long)transport->segment,
+ (unsigned long long)transport->segment_base);
+
+ /* Attach the segment to the Axon endpoints */
+ err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX attach failed: %d\n",
+ (int)err);
+ ret = okl4_error_to_errno(err);
+ goto fail_attach;
+ }
+
+ err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX attach failed: %d\n",
+ (int)err);
+ ret = okl4_error_to_errno(err);
+ goto fail_attach;
+ }
+
+ /* Array of pointers to the source TX pool for each outgoing buffer. */
+ transport->tx_pools = devm_kzalloc(transport->axon_dev,
+ sizeof(*transport->tx_pools) * transport->queue_size,
+ GFP_KERNEL);
+ if (!transport->tx_pools) {
+ err = -ENOMEM;
+ goto fail_alloc_tx_pools;
+ }
+
+ /* Set up the rx queue descriptors. */
+ transport->rx = queue;
+ transport->rx_phys = dma_handle;
+ transport->rx_size = rx_queue_size;
+ transport->rx_descs = (void *)(transport->rx + 1);
+ transport->rx_ptrs = (void *)(transport->rx_descs + transport->queue_size);
+ okl4_axon_queue_size_setallocorder(&transport->rx->queue_sizes[0],
+ rx_buffer_order);
+ transport->rx->queues[0].queue_offset = sizeof(*transport->rx);
+ transport->rx->queues[0].entries = transport->queue_size;
+ transport->rx->queues[0].uptr = 0;
+ transport->rx->queues[0].kptr = 0;
+ transport->rx_uptr_allocated = 0;
+
+ /* Set up the tx queue descriptors. */
+ transport->tx = queue + ALIGN(rx_queue_size,
+ __alignof__(*transport->tx));
+ transport->tx_phys = dma_handle + ((void *)transport->tx - queue);
+ transport->tx_size = tx_queue_size;
+ transport->tx_descs = (void *)(transport->tx + 1);
+ transport->tx->queues[0].queue_offset = sizeof(*transport->tx);
+ transport->tx->queues[0].entries = transport->queue_size;
+ transport->tx->queues[0].uptr = 0;
+ transport->tx->queues[0].kptr = 0;
+ transport->tx_uptr_freed = 0;
+
+ /* Create a DMA pool for the RX buffers. */
+ transport->rx_pool = dmam_pool_create("vs_axon_rx_pool",
+ transport->axon_dev, 1 << rx_buffer_order,
+ max(dma_get_cache_alignment(),
+ 1 << OKL4_PRESHIFT_LADDR_AXON_DATA_INFO), 0);
+
+ return 0;
+
+fail_alloc_tx_pools:
+fail_attach:
+fail_lookup_segment:
+ dmam_free_coherent(transport->axon_dev, queue_size, queue, dma_handle);
+fail_alloc_dma:
+ return ret;
+}
+
+static void transport_axon_free_descs(struct vs_transport_axon *transport)
+{
+ int i;
+
+ tasklet_disable(&transport->rx_tasklet);
+ tasklet_kill(&transport->rx_tasklet);
+
+ tasklet_disable(&transport->tx_tasklet);
+ tasklet_kill(&transport->tx_tasklet);
+
+ cancel_delayed_work_sync(&transport->free_bufs_work);
+
+ transport->tx = NULL;
+ transport->tx_descs = NULL;
+
+ for (i = 0; i < transport->rx->queues[0].entries; i++) {
+ struct okl4_axon_queue_entry *desc = &transport->rx_descs[i];
+
+ if (okl4_axon_data_info_getusr(&desc->info)) {
+ void *ptr = transport->rx_ptrs[i];
+ dma_addr_t dma = okl4_axon_data_info_getladdr(&desc->info);
+ dma_pool_free(transport->rx_pool, ptr, dma);
+ }
+ }
+
+ transport->rx = NULL;
+ transport->rx_descs = NULL;
+ transport->rx_ptrs = NULL;
+
+ /* Let devm free the queues so we don't have to keep the dma handle */
+}
+
+static int transport_axon_probe(struct platform_device *dev)
+{
+ struct vs_transport_axon *priv = NULL;
+ u32 cap[2];
+ u32 queue_size, msg_size;
+ int ret, i;
+ const char* name;
+
+ if (!dev_get_cma_area(&dev->dev) && !okl4_single_physical_segment) {
+ dev_err(&dev->dev, "Multiple physical segments, but CMA is disabled\n");
+ return -ENOSYS;
+ }
+
+ dev->dev.coherent_dma_mask = ~(u64)0;
+ dev->dev.archdata.dma_ops = &axon_dma_ops;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(struct vs_transport_axon) +
+ sizeof(unsigned long), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&dev->dev, "create transport object failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_priv;
+ }
+ dev_set_drvdata(&dev->dev, priv);
+
+ priv->of_node = of_get_child_by_name(dev->dev.of_node,
+ "virtual-session");
+ if ((!priv->of_node) ||
+ (!of_device_is_compatible(priv->of_node,
+ "okl,virtual-session"))) {
+ dev_err(&dev->dev, "missing virtual-session node\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+
+ name = dev->dev.of_node->full_name;
+ of_property_read_string(dev->dev.of_node, "label", &name);
+
+ if (of_property_read_bool(priv->of_node, "okl,is-client")) {
+ priv->is_server = false;
+ } else if (of_property_read_bool(priv->of_node, "okl,is-server")) {
+ priv->is_server = true;
+ } else {
+ dev_err(&dev->dev, "virtual-session node is not marked as client or server\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+
+ priv->transport.vt = &tvt;
+ priv->transport.type = "microvisor";
+ priv->axon_dev = &dev->dev;
+
+ /* Read the Axon caps */
+ ret = of_property_read_u32_array(dev->dev.of_node, "reg", cap, 2);
+ if (ret < 0 || cap[0] == OKL4_KCAP_INVALID ||
+ cap[1] == OKL4_KCAP_INVALID) {
+ dev_err(&dev->dev, "missing axon endpoint caps\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+ priv->tx_cap = cap[0];
+ priv->rx_cap = cap[1];
+
+ /* Set transport properties; default to a 64kb buffer */
+ queue_size = 16;
+ (void)of_property_read_u32(priv->of_node, "okl,queue-length",
+ &queue_size);
+ priv->queue_size = max((size_t)queue_size, MIN_QUEUE_SIZE);
+
+ msg_size = PAGE_SIZE - sizeof(vs_service_id_t);
+ (void)of_property_read_u32(priv->of_node, "okl,message-size",
+ &msg_size);
+ priv->msg_size = max((size_t)msg_size, MIN_MSG_SIZE);
+
+ /*
+ * Since the Axon API requires received message size limits to be
+ * powers of two, we must round up the message size (including the
+ * space reserved for the service ID).
+ */
+ priv->msg_size = roundup_pow_of_two(priv->msg_size +
+ sizeof(vs_service_id_t)) - sizeof(vs_service_id_t);
+ if (priv->msg_size != msg_size)
+ dev_info(&dev->dev, "message size rounded up from %zd to %zd\n",
+ (size_t)msg_size, priv->msg_size);
+
+ INIT_LIST_HEAD(&priv->tx_queue);
+
+ /* Initialise the activation state, tasklets, and RX retry timer */
+ spin_lock_init(&priv->readiness_lock);
+ priv->readiness = VS_TRANSPORT_INIT;
+
+ tasklet_init(&priv->rx_tasklet, transport_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, transport_tx_tasklet,
+ (unsigned long)priv);
+
+ INIT_DELAYED_WORK(&priv->free_bufs_work, transport_free_bufs_work);
+ spin_lock_init(&priv->rx_alloc_lock);
+ priv->rx_alloc_extra = 0;
+ INIT_LIST_HEAD(&priv->rx_freelist);
+
+ setup_timer(&priv->rx_retry_timer, transport_rx_retry_timer,
+ (unsigned long)priv);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+ set_timer_slack(&priv->rx_retry_timer, HZ);
+#endif
+
+ /* Keep RX disabled until the core service is ready. */
+ tasklet_disable(&priv->rx_tasklet);
+
+ ret = transport_axon_probe_virqs(priv);
+ if (ret < 0)
+ goto err_probe_virqs;
+
+ if (priv->notify_rx_nirqs) {
+ ret = alloc_notify_info(&dev->dev, &priv->transport.notify_info,
+ &priv->transport.notify_info_size,
+ priv->notify_rx_nirqs);
+ if (ret < 0) {
+ dev_err(&dev->dev, "Alloc notify_info failed\n");
+ goto err_alloc_notify;
+ }
+ } else {
+ priv->transport.notify_info = NULL;
+ priv->transport.notify_info_size = 0;
+ }
+
+ priv->free_bufs_pool = transport_axon_init_tx_pool(priv, priv->msg_size,
+ FREE_BUFS_QUOTA);
+ if (IS_ERR(priv->free_bufs_pool)) {
+ ret = PTR_ERR(priv->free_bufs_pool);
+ goto err_init_free_bufs_pool;
+ }
+
+ ret = transport_axon_setup_descs(priv);
+ if (ret < 0)
+ goto err_setup_descs;
+
+ /* Allocate RX buffers for free bufs messages */
+ for (i = 0; i < FREE_BUFS_QUOTA; i++) {
+ dma_addr_t laddr;
+ struct vs_axon_rx_freelist_entry *buf =
+ dma_pool_alloc(priv->rx_pool, GFP_KERNEL, &laddr);
+ if (!buf)
+ goto err_alloc_rx_free_bufs;
+ buf->laddr = laddr;
+
+ spin_lock_irq(&priv->rx_alloc_lock);
+ list_add_tail(&buf->list, &priv->rx_freelist);
+ spin_unlock_irq(&priv->rx_alloc_lock);
+ }
+
+ /* Set up the session device */
+ priv->session_dev = vs_session_register(&priv->transport, &dev->dev,
+ priv->is_server, name);
+ if (IS_ERR(priv->session_dev)) {
+ ret = PTR_ERR(priv->session_dev);
+ dev_err(&dev->dev, "failed to register session: %d\n", ret);
+ goto err_session_register;
+ }
+
+ /*
+ * Start the core service. Note that it can't actually communicate
+ * until the initial reset completes.
+ */
+ vs_session_start(priv->session_dev);
+
+ /*
+ * Reset the transport. This will also set the Axons' segment
+ * attachments, and eventually the Axons' queue pointers (once the
+ * session marks the transport ready).
+ */
+ transport_reset(&priv->transport);
+
+ /*
+ * We're ready to start handling IRQs at this point, so register the
+ * handlers.
+ */
+ ret = transport_axon_request_irqs(priv);
+ if (ret < 0)
+ goto err_irq_register;
+
+ return 0;
+
+err_irq_register:
+ vs_session_unregister(priv->session_dev);
+err_session_register:
+err_alloc_rx_free_bufs:
+ transport_axon_free_descs(priv);
+err_setup_descs:
+ transport_axon_put_tx_pool(priv->free_bufs_pool);
+err_init_free_bufs_pool:
+ if (priv->transport.notify_info)
+ devm_kfree(&dev->dev, priv->transport.notify_info);
+err_alloc_notify:
+err_probe_virqs:
+ del_timer_sync(&priv->rx_retry_timer);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ cancel_delayed_work_sync(&priv->free_bufs_work);
+error_of_node:
+ devm_kfree(&dev->dev, priv);
+err_alloc_priv:
+ return ret;
+}
+
+static int transport_axon_remove(struct platform_device *dev)
+{
+ struct vs_transport_axon *priv = dev_get_drvdata(&dev->dev);
+ int i;
+
+ for (i = 0; i < priv->notify_rx_nirqs; i++)
+ devm_free_irq(&dev->dev, priv->notify_irq[i], priv);
+
+ devm_free_irq(&dev->dev, priv->rx_irq, priv);
+ irq_dispose_mapping(priv->rx_irq);
+ devm_free_irq(&dev->dev, priv->tx_irq, priv);
+ irq_dispose_mapping(priv->tx_irq);
+ devm_free_irq(&dev->dev, priv->reset_irq, priv);
+ irq_dispose_mapping(priv->reset_irq);
+
+ del_timer_sync(&priv->rx_retry_timer);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ cancel_delayed_work_sync(&priv->free_bufs_work);
+
+ priv->readiness = VS_TRANSPORT_SHUTDOWN;
+ vs_session_unregister(priv->session_dev);
+ WARN_ON(priv->readiness != VS_TRANSPORT_SHUTDOWN);
+
+ transport_axon_free_descs(priv);
+ transport_axon_put_tx_pool(priv->free_bufs_pool);
+
+ if (priv->transport.notify_info)
+ devm_kfree(&dev->dev, priv->transport.notify_info);
+
+ free_tx_mbufs(priv);
+
+ flush_workqueue(work_queue);
+
+ while (!list_empty(&priv->rx_freelist)) {
+ struct vs_axon_rx_freelist_entry *buf;
+ buf = list_first_entry(&priv->rx_freelist,
+ struct vs_axon_rx_freelist_entry, list);
+ list_del(&buf->list);
+ dma_pool_free(priv->rx_pool, buf, buf->laddr);
+ }
+
+ devm_kfree(&dev->dev, priv);
+ return 0;
+}
+
+static const struct of_device_id transport_axon_of_match[] = {
+ { .compatible = "okl,microvisor-axon-transport", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, transport_axon_of_match);
+
+static struct platform_driver transport_axon_driver = {
+ .probe = transport_axon_probe,
+ .remove = transport_axon_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+ .of_match_table = of_match_ptr(transport_axon_of_match),
+ },
+};
+
+static int __init vs_transport_axon_init(void)
+{
+ int ret;
+ okl4_error_t err;
+ struct device_node *cpus;
+ struct zone *zone;
+ struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+ u32 last_seen_attachment = -1;
+ bool first_attachment;
+
+ printk(KERN_INFO "Virtual Services transport driver for OKL4 Axons\n");
+
+ /* Allocate the Axon cleanup workqueue */
+ work_queue = alloc_workqueue("axon_cleanup", 0, 0);
+ if (!work_queue) {
+ ret = -ENOMEM;
+ goto fail_create_workqueue;
+ }
+
+ /* Locate the MMU capability, needed for lookups */
+ cpus = of_find_node_by_path("/cpus");
+ if (IS_ERR_OR_NULL(cpus)) {
+ ret = -EINVAL;
+ goto fail_mmu_cap;
+ }
+ ret = of_property_read_u32(cpus, "okl,vmmu-capability", &okl4_mmu_cap);
+ if (ret) {
+ goto fail_mmu_cap;
+ }
+ if (okl4_mmu_cap == OKL4_KCAP_INVALID) {
+ printk(KERN_ERR "%s: OKL4 MMU capability not found\n", __func__);
+ ret = -EPERM;
+ goto fail_mmu_cap;
+ }
+
+ /*
+ * Determine whether there are multiple OKL4 physical memory segments
+ * in this Cell. If so, every transport device must have a valid CMA
+ * region, to guarantee that its buffer allocations all come from the
+ * segment that is attached to the axon endpoints.
+ *
+ * We assume that each zone is contiguously mapped in stage 2 with a
+ * constant physical-to-IPA offset, typically 0. The weaver won't
+ * violate this assumption for Linux (or other HLOS) guests unless it
+ * is explicitly told to.
+ */
+ okl4_single_physical_segment = true;
+ first_attachment = true;
+ for_each_zone(zone) {
+ u32 attachment;
+
+ /* We only care about zones that the page allocator is using */
+ if (!zone->managed_pages)
+ continue;
+
+ /* Find the segment at the start of the zone */
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ zone->zone_start_pfn, -1);
+ err = okl4_mmu_lookup_index_geterror(
+ &lookup_return.segment_index);
+ if (err != OKL4_OK) {
+ printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+ __func__);
+ okl4_single_physical_segment = false;
+ break;
+ }
+ attachment = okl4_mmu_lookup_index_getindex(
+ &lookup_return.segment_index);
+
+ if (first_attachment) {
+ last_seen_attachment = attachment;
+ first_attachment = false;
+ } else if (last_seen_attachment != attachment) {
+ okl4_single_physical_segment = false;
+ break;
+ }
+
+ /* Find the segment at the end of the zone */
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ zone_end_pfn(zone) - 1, -1);
+ err = okl4_mmu_lookup_index_geterror(
+ &lookup_return.segment_index);
+ if (err != OKL4_OK) {
+ printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+ __func__);
+ okl4_single_physical_segment = false;
+ break;
+ }
+ attachment = okl4_mmu_lookup_index_getindex(
+ &lookup_return.segment_index);
+
+ /* Check that it's still the same segment */
+ if (last_seen_attachment != attachment) {
+ okl4_single_physical_segment = false;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: physical segment count %s\n", __func__,
+ okl4_single_physical_segment ? "1" : ">1");
+#endif
+
+ mbuf_cache = KMEM_CACHE(vs_mbuf_axon, 0UL);
+ if (!mbuf_cache) {
+ ret = -ENOMEM;
+ goto kmem_cache_failed;
+ }
+
+ ret = platform_driver_register(&transport_axon_driver);
+ if (ret)
+ goto register_plat_driver_failed;
+
+ return ret;
+
+register_plat_driver_failed:
+ kmem_cache_destroy(mbuf_cache);
+ mbuf_cache = NULL;
+kmem_cache_failed:
+fail_mmu_cap:
+ if (work_queue)
+ destroy_workqueue(work_queue);
+fail_create_workqueue:
+ return ret;
+}
+
+static void __exit vs_transport_axon_exit(void)
+{
+ platform_driver_unregister(&transport_axon_driver);
+
+ rcu_barrier();
+
+ if (mbuf_cache)
+ kmem_cache_destroy(mbuf_cache);
+ mbuf_cache = NULL;
+
+ if (work_queue)
+ destroy_workqueue(work_queue);
+}
+
+module_init(vs_transport_axon_init);
+module_exit(vs_transport_axon_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 37e2b8211006..716d26441747 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1465,9 +1465,9 @@ submit_and_realloc:
bio = NULL;
goto set_error_page;
}
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, bio, dun);
}
- if (bio_encrypted)
- fscrypt_set_ice_dun(inode, bio, dun);
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
diff --git a/include/Kbuild b/include/Kbuild
index bab1145bc7a7..9205b04e5087 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,2 +1,6 @@
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
+
+ifneq ($(VSERVICES_SUPPORT), "")
+header-y += vservices/
+endif
diff --git a/include/asm-generic/okl4_virq.h b/include/asm-generic/okl4_virq.h
new file mode 100644
index 000000000000..2eca110fa14d
--- /dev/null
+++ b/include/asm-generic/okl4_virq.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-generic/okl4_virq.h
+ *
+ * Copyright (c) 2017 General Dynamics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OKL4_VIRQ_H__
+#define __OKL4_VIRQ_H__
+
+#include <linux/irq.h>
+#include <microvisor/microvisor.h>
+
+static inline okl4_virq_flags_t okl4_get_virq_payload(unsigned int irq)
+{
+ struct irq_data *irqd = irq_get_irq_data(irq);
+
+ if (WARN_ON_ONCE(!irqd))
+ return 0;
+
+ return _okl4_sys_interrupt_get_payload(irqd_to_hwirq(irqd)).payload;
+}
+
+#endif
diff --git a/include/linux/Kbuild.vservices b/include/linux/Kbuild.vservices
new file mode 100644
index 000000000000..392f559f9fde
--- /dev/null
+++ b/include/linux/Kbuild.vservices
@@ -0,0 +1,3 @@
+#
+# Virtual Services headers which need to be exported for user-space
+#
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index af0b4346687b..b75c22f7bab8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -103,6 +103,7 @@ struct request {
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
sector_t __sector; /* sector cursor */
+ u64 __dun; /* dun for UFS */
struct bio *bio;
struct bio *biotail;
@@ -866,6 +867,11 @@ static inline sector_t blk_rq_pos(const struct request *rq)
return rq->__sector;
}
+static inline sector_t blk_rq_dun(const struct request *rq)
+{
+ return rq->__dun;
+}
+
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e7116d..e030a68ead7e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page,
list_add(&page->lru, &lruvec->lists[lru]);
}
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c28c28a1147..815d0f41e288 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -245,8 +245,6 @@ struct lruvec {
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a41244fe58d0..2097f882c5cf 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,10 @@ struct tty_struct {
wait_queue_head_t write_wait;
wait_queue_head_t read_wait;
struct work_struct hangup_work;
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ int delayed_work;
+ struct delayed_work echo_delayed_work;
+#endif
void *disc_data;
void *driver_data;
spinlock_t files_lock; /* protects tty_files list */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 797100e10010..1a001fc43499 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -43,7 +43,7 @@ enum writeback_sync_modes {
*/
enum wb_reason {
WB_REASON_BACKGROUND,
- WB_REASON_TRY_TO_FREE_PAGES,
+ WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,
diff --git a/include/microvisor/kernel/microvisor.h b/include/microvisor/kernel/microvisor.h
new file mode 100644
index 000000000000..1a30d1fe18d0
--- /dev/null
+++ b/include/microvisor/kernel/microvisor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+
+#ifndef __AUTO__MICROVISOR_H__
+#define __AUTO__MICROVISOR_H__
+
+/** SDK Major number */
+#define OKL4_SDK_VERSION_MAJOR 5
+/** SDK Minor number */
+#define OKL4_SDK_VERSION_MINOR 3
+/**
+ * If defined, indicates this is an internal development version.
+ * In this case, OKL4_SDK_VERSION_RELEASE == -1
+ */
+#define OKL4_SDK_VERSION_DEVELOPMENT 1
+/** SDK Release (revision) number */
+#define OKL4_SDK_VERSION_RELEASE (-1)
+/** SDK Maintenance number. Indicates the maintenance sequence revision. */
+#define OKL4_SDK_VERSION_MAINTENANCE 0
+
+
+/** @addtogroup lib_microvisor_helpers Microvisor Helpers
+ * @{
+ */
+
+/** Common C and ASM defines. */
+
+/** OKL4 Kernel supports a Virtual CPU (vCPU) interface. */
+#define OKL4_VCPU_SUPPORT
+
+
+/** OKL4 Kernel vCPU API supports SMP guest cells. */
+#define OKL4_VCPU_SMP_SUPPORT
+
+
+/** @} */
+#endif /* __AUTO__MICROVISOR_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/offsets.h b/include/microvisor/kernel/offsets.h
new file mode 100644
index 000000000000..9517acf554e1
--- /dev/null
+++ b/include/microvisor/kernel/offsets.h
@@ -0,0 +1,1534 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+#ifndef __AUTO__MICROVISOR_OFFSETS_H__
+#define __AUTO__MICROVISOR_OFFSETS_H__
+
+#if defined(ASSEMBLY)
+/* LWEE structure's type offsets */
+
+/**
+ * Offsets for struct okl4_atomic_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_REGISTER_SIZE) */
+#define OKL4_STRUCT_ATOMIC_REGISTER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_REGISTER_VALUE) */
+#define OKL4_OFS_ATOMIC_REGISTER_VALUE (0)
+/**
+ * Offsets for struct okl4_atomic_uint16
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT16_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT16_SIZE (2)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT16_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT16_VALUE (0)
+/**
+ * Offsets for struct okl4_atomic_uint32
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT32_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT32_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT32_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT32_VALUE (0)
+/**
+ * Offsets for struct okl4_atomic_uint64
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT64_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT64_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT64_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT64_VALUE (0)
+/**
+ * Offsets for struct okl4_atomic_uint8
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT8_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT8_SIZE (1)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT8_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT8_VALUE (0)
+/**
+ * Offsets for struct okl4_axon_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_DATA_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_KCAP) */
+#define OKL4_OFS_AXON_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_SEGMENT) */
+#define OKL4_OFS_AXON_DATA_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_VIRQ) */
+#define OKL4_OFS_AXON_DATA_VIRQ (8)
+/**
+ * Offsets for struct okl4_axon_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_EP_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_EP_DATA_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX) */
+#define OKL4_OFS_AXON_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX) */
+#define OKL4_OFS_AXON_EP_DATA_TX (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_TX_VIRQ (20)
+/**
+ * Offsets for struct okl4_axon_queue
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRIES) */
+#define OKL4_OFS_AXON_QUEUE_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_KPTR) */
+#define OKL4_OFS_AXON_QUEUE_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_UPTR) */
+#define OKL4_OFS_AXON_QUEUE_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING0_2) */
+#define OKL4_OFS_AXON_QUEUE___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING1_3) */
+#define OKL4_OFS_AXON_QUEUE___PADDING1_3 (11)
+/**
+ * Offsets for struct okl4_axon_queue_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_INFO) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_INFO (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE (16)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7 (23)
+/**
+ * Offsets for struct okl4_axon_rx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_RX_SIZE) */
+#define OKL4_STRUCT_AXON_RX_SIZE (56)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES) */
+#define OKL4_OFS_AXON_RX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0) */
+#define OKL4_OFS_AXON_RX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1) */
+#define OKL4_OFS_AXON_RX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3 (47)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_0) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_0 (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_1) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_1 (50)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_2) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_2 (52)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_3) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_3 (54)
+/**
+ * Offsets for struct okl4_axon_tx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_TX_SIZE) */
+#define OKL4_STRUCT_AXON_TX_SIZE (48)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES) */
+#define OKL4_OFS_AXON_TX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0) */
+#define OKL4_OFS_AXON_TX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1) */
+#define OKL4_OFS_AXON_TX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3 (47)
+/**
+ * Offsets for struct okl4_range_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_RANGE_ITEM_SIZE) */
+#define OKL4_STRUCT_RANGE_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_BASE) */
+#define OKL4_OFS_RANGE_ITEM_BASE (0)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_SIZE) */
+#define OKL4_OFS_RANGE_ITEM_SIZE (8)
+/**
+ * Offsets for struct okl4_virtmem_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_ITEM_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE (8)
+/**
+ * Offsets for struct okl4_cell_management_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE (104)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA (24)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE (32)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU (40)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4 (44)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5 (45)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6 (46)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7 (47)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME (48)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP (56)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ (60)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX (64)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4 (68)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5 (69)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6 (70)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7 (71)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS (72)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS (80)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE (88)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP (89)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED (90)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED (91)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE (92)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5 (93)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6 (94)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7 (95)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS (96)
+/**
+ * Offsets for struct okl4_cell_management
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEMS (8)
+/**
+ * Offsets for struct okl4_segment_mapping
+ **/
+/*lint -esym(621, OKL4_STRUCT_SEGMENT_MAPPING_SIZE) */
+#define OKL4_STRUCT_SEGMENT_MAPPING_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_SIZE) */
+#define OKL4_OFS_SEGMENT_MAPPING_SIZE (8)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR (16)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_CAP) */
+#define OKL4_OFS_SEGMENT_MAPPING_CAP (24)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_DEVICE) */
+#define OKL4_OFS_SEGMENT_MAPPING_DEVICE (28)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_OWNED) */
+#define OKL4_OFS_SEGMENT_MAPPING_OWNED (29)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING0_6) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING0_6 (30)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING1_7) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING1_7 (31)
+/**
+ * Offsets for struct okl4_cell_management_segments
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS (8)
+/**
+ * Offsets for struct okl4_cell_management_vcpus
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS (4)
+/**
+ * Offsets for struct _okl4_env_hdr
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_HDR_SIZE) */
+#define _OKL4_STRUCT_ENV_HDR_SIZE (4)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_HDR_COUNT (2)
+/**
+ * Offsets for struct _okl4_env_item
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_ITEM_SIZE) */
+#define _OKL4_STRUCT_ENV_ITEM_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_NAME) */
+#define _OKL4_OFS_ENV_ITEM_NAME (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_ITEM) */
+#define _OKL4_OFS_ENV_ITEM_ITEM (8)
+/**
+ * Offsets for struct _okl4_env
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_SIZE) */
+#define _OKL4_STRUCT_ENV_SIZE (8)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR) */
+#define _OKL4_OFS_ENV_ENV_HDR (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_ENV_HDR_COUNT (2)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING0_4) */
+#define _OKL4_OFS_ENV___PADDING0_4 (4)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING1_5) */
+#define _OKL4_OFS_ENV___PADDING1_5 (5)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING2_6) */
+#define _OKL4_OFS_ENV___PADDING2_6 (6)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING3_7) */
+#define _OKL4_OFS_ENV___PADDING3_7 (7)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_ITEM) */
+#define _OKL4_OFS_ENV_ENV_ITEM (8)
+/**
+ * Offsets for struct okl4_env_access_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_CELL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NAME) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY) */
+#define OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY (12)
+/**
+ * Offsets for struct okl4_env_access_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX (28)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS (32)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS (36)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME (40)
+/**
+ * Offsets for struct okl4_env_access_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_CELLS (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES (16)
+/**
+ * Offsets for struct okl4_env_args
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ARGS_SIZE) */
+#define OKL4_STRUCT_ENV_ARGS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGC) */
+#define OKL4_OFS_ENV_ARGS_ARGC (0)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING0_4) */
+#define OKL4_OFS_ENV_ARGS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING1_5) */
+#define OKL4_OFS_ENV_ARGS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING2_6) */
+#define OKL4_OFS_ENV_ARGS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING3_7) */
+#define OKL4_OFS_ENV_ARGS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGV) */
+#define OKL4_OFS_ENV_ARGS_ARGV (8)
+/**
+ * Offsets for struct okl4_env_interrupt_device_map
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES (4)
+/**
+ * Offsets for struct okl4_interrupt
+ **/
+/*lint -esym(621, OKL4_STRUCT_INTERRUPT_SIZE) */
+#define OKL4_STRUCT_INTERRUPT_SIZE (4)
+/*lint -esym(621, OKL4_OFS_INTERRUPT_KCAP) */
+#define OKL4_OFS_INTERRUPT_KCAP (0)
+/**
+ * Offsets for struct okl4_env_interrupt_handle
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP (4)
+/**
+ * Offsets for struct okl4_env_interrupt_list
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT (16)
+/**
+ * Offsets for struct okl4_env_profile_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CELL_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_0) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_0 (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_1) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_1 (1)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_2) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_2 (2)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_3) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_3 (3)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_8) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_8 (8)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_9) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_9 (9)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_10) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_10 (10)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_11) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_11 (11)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_12) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_12 (12)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_13) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_13 (13)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_14) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_14 (14)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_15) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_15 (15)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_16) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_16 (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_17) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_17 (17)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_18) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_18 (18)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_19) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_19 (19)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_20) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_20 (20)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_21) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_21 (21)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_22) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_22 (22)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_23) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_23 (23)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_24) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_24 (24)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_25) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_25 (25)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_26) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_26 (26)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_27) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_27 (27)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_28) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_28 (28)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_29) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_29 (29)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_30) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_30 (30)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_31) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_31 (31)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES (32)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4 (36)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5 (37)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6 (38)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7 (39)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_CORE) */
+#define OKL4_OFS_ENV_PROFILE_CELL_CORE (40)
+/**
+ * Offsets for struct okl4_env_profile_cpu
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CPU_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CPU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CPU_CAP) */
+#define OKL4_OFS_ENV_PROFILE_CPU_CAP (0)
+/**
+ * Offsets for struct okl4_env_profile_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_CELLS) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_CELLS (8)
+/**
+ * Offsets for struct okl4_env_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_BASE) */
+#define OKL4_OFS_ENV_SEGMENT_BASE (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_SIZE) */
+#define OKL4_OFS_ENV_SEGMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_CAP_ID) */
+#define OKL4_OFS_ENV_SEGMENT_CAP_ID (16)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_RWX) */
+#define OKL4_OFS_ENV_SEGMENT_RWX (20)
+/**
+ * Offsets for struct okl4_env_segment_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS (8)
+/**
+ * Offsets for struct okl4_firmware_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE (32)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_FILESZ) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_FILESZ (16)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF (24)
+/**
+ * Offsets for struct okl4_firmware_segments_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS (8)
+/**
+ * Offsets for struct okl4_kmmu
+ **/
+/*lint -esym(621, OKL4_STRUCT_KMMU_SIZE) */
+#define OKL4_STRUCT_KMMU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_KMMU_KCAP) */
+#define OKL4_OFS_KMMU_KCAP (0)
+/**
+ * Offsets for struct okl4_ksp_user_agent
+ **/
+/*lint -esym(621, OKL4_STRUCT_KSP_USER_AGENT_SIZE) */
+#define OKL4_STRUCT_KSP_USER_AGENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_KCAP) */
+#define OKL4_OFS_KSP_USER_AGENT_KCAP (0)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_VIRQ) */
+#define OKL4_OFS_KSP_USER_AGENT_VIRQ (4)
+/**
+ * Offsets for struct okl4_pipe_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_KCAP) */
+#define OKL4_OFS_PIPE_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_VIRQ) */
+#define OKL4_OFS_PIPE_DATA_VIRQ (4)
+/**
+ * Offsets for struct okl4_pipe_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_EP_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_EP_DATA_SIZE (16)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX) */
+#define OKL4_OFS_PIPE_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_VIRQ (4)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX) */
+#define OKL4_OFS_PIPE_EP_DATA_TX (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_KCAP (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_VIRQ (12)
+/**
+ * Offsets for struct okl4_link
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINK_SIZE) */
+#define OKL4_STRUCT_LINK_SIZE (80)
+/*lint -esym(621, OKL4_OFS_LINK_NAME) */
+#define OKL4_OFS_LINK_NAME (0)
+/*lint -esym(621, OKL4_OFS_LINK_OPAQUE) */
+#define OKL4_OFS_LINK_OPAQUE (8)
+/*lint -esym(621, OKL4_OFS_LINK_PARTNER_NAME) */
+#define OKL4_OFS_LINK_PARTNER_NAME (16)
+/*lint -esym(621, OKL4_OFS_LINK_ROLE) */
+#define OKL4_OFS_LINK_ROLE (24)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING0_4) */
+#define OKL4_OFS_LINK___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING1_5) */
+#define OKL4_OFS_LINK___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING2_6) */
+#define OKL4_OFS_LINK___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING3_7) */
+#define OKL4_OFS_LINK___PADDING3_7 (31)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT) */
+#define OKL4_OFS_LINK_TRANSPORT (32)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT_TYPE) */
+#define OKL4_OFS_LINK_TRANSPORT_TYPE (72)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING4_4) */
+#define OKL4_OFS_LINK___PADDING4_4 (76)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING5_5) */
+#define OKL4_OFS_LINK___PADDING5_5 (77)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING6_6) */
+#define OKL4_OFS_LINK___PADDING6_6 (78)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING7_7) */
+#define OKL4_OFS_LINK___PADDING7_7 (79)
+/**
+ * Offsets for struct okl4_links
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINKS_SIZE) */
+#define OKL4_STRUCT_LINKS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_LINKS_NUM_LINKS) */
+#define OKL4_OFS_LINKS_NUM_LINKS (0)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING0_4) */
+#define OKL4_OFS_LINKS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING1_5) */
+#define OKL4_OFS_LINKS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING2_6) */
+#define OKL4_OFS_LINKS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING3_7) */
+#define OKL4_OFS_LINKS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_LINKS_LINKS) */
+#define OKL4_OFS_LINKS_LINKS (8)
+/**
+ * Offsets for struct okl4_machine_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_MACHINE_INFO_SIZE) */
+#define OKL4_STRUCT_MACHINE_INFO_SIZE (24)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE (0)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_NUM_CPUS) */
+#define OKL4_OFS_MACHINE_INFO_NUM_CPUS (16)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING0_4) */
+#define OKL4_OFS_MACHINE_INFO___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING1_5) */
+#define OKL4_OFS_MACHINE_INFO___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING2_6) */
+#define OKL4_OFS_MACHINE_INFO___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING3_7) */
+#define OKL4_OFS_MACHINE_INFO___PADDING3_7 (23)
+/**
+ * Offsets for struct okl4_merged_physpool
+ **/
+/*lint -esym(621, OKL4_STRUCT_MERGED_PHYSPOOL_SIZE) */
+#define OKL4_STRUCT_MERGED_PHYSPOOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR) */
+#define OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS (8)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS (16)
+/**
+ * Offsets for struct okl4_microvisor_timer
+ **/
+/*lint -esym(621, OKL4_STRUCT_MICROVISOR_TIMER_SIZE) */
+#define OKL4_STRUCT_MICROVISOR_TIMER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_KCAP) */
+#define OKL4_OFS_MICROVISOR_TIMER_KCAP (0)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_VIRQ) */
+#define OKL4_OFS_MICROVISOR_TIMER_VIRQ (4)
+/**
+ * Offsets for struct okl4_cpu_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_CPU_REGISTERS_SIZE) */
+#define OKL4_STRUCT_CPU_REGISTERS_SIZE (448)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X) */
+#define OKL4_OFS_CPU_REGISTERS_X (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_0) */
+#define OKL4_OFS_CPU_REGISTERS_X_0 (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_1) */
+#define OKL4_OFS_CPU_REGISTERS_X_1 (8)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_2) */
+#define OKL4_OFS_CPU_REGISTERS_X_2 (16)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_3) */
+#define OKL4_OFS_CPU_REGISTERS_X_3 (24)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_4) */
+#define OKL4_OFS_CPU_REGISTERS_X_4 (32)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_5) */
+#define OKL4_OFS_CPU_REGISTERS_X_5 (40)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_6) */
+#define OKL4_OFS_CPU_REGISTERS_X_6 (48)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_7) */
+#define OKL4_OFS_CPU_REGISTERS_X_7 (56)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_8) */
+#define OKL4_OFS_CPU_REGISTERS_X_8 (64)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_9) */
+#define OKL4_OFS_CPU_REGISTERS_X_9 (72)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_10) */
+#define OKL4_OFS_CPU_REGISTERS_X_10 (80)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_11) */
+#define OKL4_OFS_CPU_REGISTERS_X_11 (88)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_12) */
+#define OKL4_OFS_CPU_REGISTERS_X_12 (96)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_13) */
+#define OKL4_OFS_CPU_REGISTERS_X_13 (104)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_14) */
+#define OKL4_OFS_CPU_REGISTERS_X_14 (112)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_15) */
+#define OKL4_OFS_CPU_REGISTERS_X_15 (120)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_16) */
+#define OKL4_OFS_CPU_REGISTERS_X_16 (128)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_17) */
+#define OKL4_OFS_CPU_REGISTERS_X_17 (136)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_18) */
+#define OKL4_OFS_CPU_REGISTERS_X_18 (144)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_19) */
+#define OKL4_OFS_CPU_REGISTERS_X_19 (152)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_20) */
+#define OKL4_OFS_CPU_REGISTERS_X_20 (160)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_21) */
+#define OKL4_OFS_CPU_REGISTERS_X_21 (168)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_22) */
+#define OKL4_OFS_CPU_REGISTERS_X_22 (176)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_23) */
+#define OKL4_OFS_CPU_REGISTERS_X_23 (184)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_24) */
+#define OKL4_OFS_CPU_REGISTERS_X_24 (192)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_25) */
+#define OKL4_OFS_CPU_REGISTERS_X_25 (200)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_26) */
+#define OKL4_OFS_CPU_REGISTERS_X_26 (208)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_27) */
+#define OKL4_OFS_CPU_REGISTERS_X_27 (216)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_28) */
+#define OKL4_OFS_CPU_REGISTERS_X_28 (224)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_29) */
+#define OKL4_OFS_CPU_REGISTERS_X_29 (232)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_30) */
+#define OKL4_OFS_CPU_REGISTERS_X_30 (240)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL0 (248)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IP) */
+#define OKL4_OFS_CPU_REGISTERS_IP (256)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPSR) */
+#define OKL4_OFS_CPU_REGISTERS_CPSR (264)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING0_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING0_4 (268)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING1_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING1_5 (269)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING2_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING2_6 (270)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING3_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING3_7 (271)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL1 (272)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ELR_EL1 (280)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_EL1 (288)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_ABT) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_ABT (292)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_UND) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_UND (296)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_IRQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_IRQ (300)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_FIQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_FIQ (304)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CSSELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CSSELR_EL1 (308)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SCTLR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SCTLR_EL1 (312)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPACR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CPACR_EL1 (316)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR0_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR0_EL1 (320)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR1_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR1_EL1 (328)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TCR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TCR_EL1 (336)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_DACR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_DACR32_EL2 (344)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IFSR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_IFSR32_EL2 (348)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ESR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ESR_EL1 (352)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING4_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING4_4 (356)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING5_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING5_5 (357)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING6_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING6_6 (358)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING7_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING7_7 (359)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_FAR_EL1 (360)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_PAR_EL1 (368)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_MAIR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_MAIR_EL1 (376)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_VBAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_VBAR_EL1 (384)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1 (392)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING8_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING8_4 (396)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING9_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING9_5 (397)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING10_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING10_6 (398)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING11_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING11_7 (399)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL1 (400)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0 (408)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL0 (416)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCR_EL0 (424)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING12_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING12_4 (428)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING13_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING13_5 (429)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING14_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING14_6 (430)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING15_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING15_7 (431)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0 (432)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2 (440)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1 (444)
+/**
+ * Offsets for struct okl4_schedule_profile_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE) */
+#define OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP (0)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME (8)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES (16)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS (20)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS (24)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS (28)
+/**
+ * Offsets for struct okl4_shared_buffer
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFER_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFER_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_CAP) */
+#define OKL4_OFS_SHARED_BUFFER_CAP (24)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING3_7 (31)
+/**
+ * Offsets for struct okl4_shared_buffers_array
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7 (15)
+/**
+ * Offsets for struct _okl4_tracebuffer_buffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4 (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5 (13)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6 (14)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7 (15)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET (32)
+/**
+ * Offsets for struct okl4_tracebuffer_env
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEBUFFER_ENV_SIZE) */
+#define OKL4_STRUCT_TRACEBUFFER_ENV_SIZE (24)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRQ) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRQ (16)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7 (23)
+/**
+ * Offsets for struct _okl4_tracebuffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_VERSION) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_VERSION (4)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ID) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ID (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER (28)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER (32)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS (36)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS (40)
+/**
+ * Offsets for struct okl4_tracepoint_entry_base
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION (8)
+/**
+ * Offsets for struct okl4_tracepoint_unpacked_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION (8)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA (12)
+/**
+ * Offsets for struct okl4_vclient_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCLIENT_INFO_SIZE) */
+#define OKL4_STRUCT_VCLIENT_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ (20)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_OPAQUE) */
+#define OKL4_OFS_VCLIENT_INFO_OPAQUE (24)
+/**
+ * Offsets for struct okl4_vcpu_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_ENTRY_SIZE) */
+#define OKL4_STRUCT_VCPU_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_VCPU) */
+#define OKL4_OFS_VCPU_ENTRY_VCPU (0)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IPI) */
+#define OKL4_OFS_VCPU_ENTRY_IPI (4)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IRQ) */
+#define OKL4_OFS_VCPU_ENTRY_IRQ (8)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING0_4) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING1_5) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING2_6) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING3_7) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_STACK_POINTER) */
+#define OKL4_OFS_VCPU_ENTRY_STACK_POINTER (16)
+/**
+ * Offsets for struct okl4_vcpu_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_TABLE_SIZE) */
+#define OKL4_STRUCT_VCPU_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_NUM_VCPUS) */
+#define OKL4_OFS_VCPU_TABLE_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING0_4) */
+#define OKL4_OFS_VCPU_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING1_5) */
+#define OKL4_OFS_VCPU_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING2_6) */
+#define OKL4_OFS_VCPU_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING3_7) */
+#define OKL4_OFS_VCPU_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_VCPU) */
+#define OKL4_OFS_VCPU_TABLE_VCPU (8)
+/**
+ * Offsets for struct okl4_vfp_ctrl_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPSR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPSR (0)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPCR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPCR (4)
+/**
+ * Offsets for struct okl4_vfp_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTER_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTER_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES) */
+#define OKL4_OFS_VFP_REGISTER___BYTES (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_0) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_1) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_1 (1)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_2) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_2 (2)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_3) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_3 (3)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_4) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_4 (4)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_5) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_5 (5)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_6) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_6 (6)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_7) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_7 (7)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_8) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_8 (8)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_9) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_9 (9)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_10) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_10 (10)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_11) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_11 (11)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_12) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_12 (12)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_13) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_13 (13)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_14) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_14 (14)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_15) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_15 (15)
+/**
+ * Offsets for struct okl4_vfp_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTERS_SIZE (528)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V0) */
+#define OKL4_OFS_VFP_REGISTERS_V0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V1) */
+#define OKL4_OFS_VFP_REGISTERS_V1 (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V2) */
+#define OKL4_OFS_VFP_REGISTERS_V2 (32)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V3) */
+#define OKL4_OFS_VFP_REGISTERS_V3 (48)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V4) */
+#define OKL4_OFS_VFP_REGISTERS_V4 (64)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V5) */
+#define OKL4_OFS_VFP_REGISTERS_V5 (80)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V6) */
+#define OKL4_OFS_VFP_REGISTERS_V6 (96)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V7) */
+#define OKL4_OFS_VFP_REGISTERS_V7 (112)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V8) */
+#define OKL4_OFS_VFP_REGISTERS_V8 (128)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V9) */
+#define OKL4_OFS_VFP_REGISTERS_V9 (144)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V10) */
+#define OKL4_OFS_VFP_REGISTERS_V10 (160)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V11) */
+#define OKL4_OFS_VFP_REGISTERS_V11 (176)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V12) */
+#define OKL4_OFS_VFP_REGISTERS_V12 (192)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V13) */
+#define OKL4_OFS_VFP_REGISTERS_V13 (208)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V14) */
+#define OKL4_OFS_VFP_REGISTERS_V14 (224)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V15) */
+#define OKL4_OFS_VFP_REGISTERS_V15 (240)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V16) */
+#define OKL4_OFS_VFP_REGISTERS_V16 (256)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V17) */
+#define OKL4_OFS_VFP_REGISTERS_V17 (272)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V18) */
+#define OKL4_OFS_VFP_REGISTERS_V18 (288)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V19) */
+#define OKL4_OFS_VFP_REGISTERS_V19 (304)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V20) */
+#define OKL4_OFS_VFP_REGISTERS_V20 (320)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V21) */
+#define OKL4_OFS_VFP_REGISTERS_V21 (336)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V22) */
+#define OKL4_OFS_VFP_REGISTERS_V22 (352)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V23) */
+#define OKL4_OFS_VFP_REGISTERS_V23 (368)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V24) */
+#define OKL4_OFS_VFP_REGISTERS_V24 (384)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V25) */
+#define OKL4_OFS_VFP_REGISTERS_V25 (400)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V26) */
+#define OKL4_OFS_VFP_REGISTERS_V26 (416)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V27) */
+#define OKL4_OFS_VFP_REGISTERS_V27 (432)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V28) */
+#define OKL4_OFS_VFP_REGISTERS_V28 (448)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V29) */
+#define OKL4_OFS_VFP_REGISTERS_V29 (464)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V30) */
+#define OKL4_OFS_VFP_REGISTERS_V30 (480)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V31) */
+#define OKL4_OFS_VFP_REGISTERS_V31 (496)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR (516)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING0_8) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING0_8 (520)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING1_9) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING1_9 (521)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING2_10) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING2_10 (522)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING3_11) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING3_11 (523)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING4_12) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING4_12 (524)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING5_13) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING5_13 (525)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING6_14) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING6_14 (526)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING7_15) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING7_15 (527)
+/**
+ * Offsets for struct okl4_virtmem_pool
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_POOL_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_POOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE (8)
+/**
+ * Offsets for struct okl4_virtual_interrupt_lines
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE) */
+#define OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES (0)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES (8)
+/**
+ * Offsets for struct okl4_vserver_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVER_INFO_SIZE) */
+#define OKL4_STRUCT_VSERVER_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_DATA) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_DATA (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES (8)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_NUM_CLIENTS) */
+#define OKL4_OFS_VSERVER_INFO_NUM_CLIENTS (24)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO___PADDING3_7 (31)
+/**
+ * Offsets for struct okl4_vservices_service_descriptor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED (16)
+/**
+ * Offsets for struct okl4_vservices_transport_microvisor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE (120)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1 (1)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2 (2)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3 (3)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4 (76)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5 (77)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6 (78)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7 (79)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES (80)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4 (92)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5 (93)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6 (94)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7 (95)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES (96)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES (104)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4 (108)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5 (109)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6 (110)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7 (111)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES (112)
+/**
+ * Offsets for struct okl4_vservices_transports
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS (8)
+
+#endif /* ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_OFFSETS_H__ */
+
diff --git a/include/microvisor/kernel/syscalls.h b/include/microvisor/kernel/syscalls.h
new file mode 100644
index 000000000000..fdc2c0d0e5f4
--- /dev/null
+++ b/include/microvisor/kernel/syscalls.h
@@ -0,0 +1,6114 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+#ifndef __AUTO__USER_SYSCALLS_H__
+#define __AUTO__USER_SYSCALLS_H__
+
+/**
+ * @cond no_doc
+ */
+#if defined(ASSEMBLY)
+#define __hvc_str(x) x
+#else
+#define _hvc_str(x) #x
+#define __hvc_str(x) _hvc_str(x)
+#endif
+#if (defined(__GNUC__) && !defined(__clang__)) && \
+ (__GNUC__ < 4 || ((__GNUC__ == 4) && (__GNUC_MINOR__ < 5)))
+#if defined(__thumb2__)
+#define hvc(i) __hvc_str(.hword 0xf7e0 | (i & 0xf); .hword 8000 | (i >> 4) @ HVC)
+#else
+#define hvc(i) __hvc_str(.word 0xe1400070 | (i & 0xf) | (i >> 4 << 8) @ HVC)
+#endif
+#else
+#if defined(__ARM_EABI__)
+#if defined(ASSEMBLY) && !defined(__clang__)
+ .arch_extension virt
+#elif !defined(__clang__)
+__asm__(
+ ".arch_extension virt\n"
+);
+#endif
+#endif
+#define hvc(i) __hvc_str(hvc i)
+#endif
+/**
+ * @endcond
+ */
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_OK OKL4_ERROR_OK
+
+/** @} */
+
+/*
+ * Syscall prototypes.
+ */
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_PROCESS_RECV
+ *
+ * @param axon_id
+ * @param transfer_limit
+ *
+ * @retval error
+ * @retval send_empty
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+ struct _okl4_sys_axon_process_recv_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)(transfer_limit & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((transfer_limit >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5184)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ result.send_empty = (okl4_bool_t)(r1);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+ struct _okl4_sys_axon_process_recv_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)transfer_limit;
+ __asm__ __volatile__(
+ "" hvc(5184) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.send_empty = (okl4_bool_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_HALTED
+ *
+ * @param axon_id
+ * @param halted
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)halted;
+ __asm__ __volatile__(
+ ""hvc(5186)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)halted;
+ __asm__ __volatile__(
+ "" hvc(5186) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+ okl4_lsize_t size)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)(base & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)(size & 0xffffffff);
+ register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5187)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+ okl4_lsize_t size)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+ __asm__ __volatile__(
+ "" hvc(5187) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)(queue & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5188)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+ __asm__ __volatile__(
+ "" hvc(5188) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+ okl4_laddr_t segment_base)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+ register uint32_t r2 asm("r2") = (uint32_t)(segment_base & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5189)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+ okl4_laddr_t segment_base)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+ __asm__ __volatile__(
+ "" hvc(5189) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+ okl4_lsize_t size)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)(base & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)(size & 0xffffffff);
+ register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5190)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+ okl4_lsize_t size)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+ __asm__ __volatile__(
+ "" hvc(5190) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)(queue & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5191)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+ __asm__ __volatile__(
+ "" hvc(5191) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+ okl4_laddr_t segment_base)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+ register uint32_t r2 asm("r2") = (uint32_t)(segment_base & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5192)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+ okl4_laddr_t segment_base)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+ __asm__ __volatile__(
+ "" hvc(5192) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_TRIGGER_SEND
+ *
+ * @param axon_id
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+ __asm__ __volatile__(
+ ""hvc(5185)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+ __asm__ __volatile__(
+ "" hvc(5185) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Acknowledge the delivery of an interrupt.
+ *
+ * @details
+ * This API returns the number and source of the highest-priority
+ * enabled,
+ * pending and inactive interrupt that is targeted at the calling vCPU
+ * and has higher priority than the calling vCPU's running group
+ * priority.
+ *
+ * The returned interrupt is marked as active, and will not be returned
+ * again
+ * by this function until @ref okl4_sys_interrupt_eoi is invoked
+ * specifying the
+ * same interrupt number and source. The vCPU's running interrupt
+ * priority is
+ * raised to the priority of the returned interrupt. This will typically
+ * result
+ * in the de-assertion of the vCPU's virtual IRQ line.
+ *
+ * If no such interrupt exists, interrupt number 1023 is returned. If
+ * the
+ * returned interrupt number is 16 or greater, the source ID is 0;
+ * otherwise it
+ * is the vCPU ID of the vCPU that raised the interrupt (which is always
+ * in the
+ * same Cell as the caller).
+ *
+ * @note Invoking this API is equivalent to reading from the GIC CPU
+ * Interface's Interrupt Acknowledge Register (\p GICC_IAR).
+ *
+ *
+ * @retval irq
+ * An interrupt line number for the virtual GIC.
+ * @retval source
+ * The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+ struct _okl4_sys_interrupt_ack_return result;
+
+ register uint32_t r0 asm("r0");
+ register uint32_t r1 asm("r1");
+ __asm__ __volatile__(
+ ""hvc(5128)"\n\t"
+ : "=r"(r0), "=r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ result.irq = (okl4_interrupt_number_t)(r0);
+ result.source = (uint8_t)(r1);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+ struct _okl4_sys_interrupt_ack_return result;
+
+ register okl4_register_t x0 asm("x0");
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5128) "\n\t"
+ : "=r"(x0), "=r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.irq = (okl4_interrupt_number_t)(x0);
+ result.source = (uint8_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a vCPU as the handler of an interrupt.
+ *
+ * @details
+ * The Microvisor virtual GIC API permits an interrupt source to be
+ * dynamically
+ * assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ * only
+ * be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ * operation attaches the interrupt to a vCPU as a private interrupt.
+ *
+ * Interrupt sources are addressed using capabilities. This operation,
+ * given
+ * a capability for an interrupt that is not currently attached to any
+ * handler,
+ * can attach the interrupt at a given unused IRQ number. If the IRQ
+ * number
+ * is between 16 and 31 (the GIC Private Peripheral Interrupt range), it
+ * will
+ * be attached to the specified vCPU; if it is between 32 and 1019 (the
+ * GIC
+ * Shared Peripheral Interrupt range), it will return an error.
+ *
+ * @note The Software Generated Interrupt range, from 0 to 15, is
+ * reserved
+ * and cannot be used to attach interrupt source capabilities.
+ *
+ * @note In most cases, interrupt sources are attached at system
+ * construction
+ * time by the OK Tool. It is not normally necessary to attach an
+ * interrupt
+ * source before using it.
+ *
+ * @param vcpu_cap
+ * A virtual CPU capability.
+ * @param irq_cap
+ * A virtual interrupt capability.
+ * @param irq_num
+ * An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+ okl4_interrupt_number_t irq_num)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu_cap;
+ register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+ register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+ __asm__ __volatile__(
+ ""hvc(5134)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+ okl4_interrupt_number_t irq_num)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu_cap;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+ __asm__ __volatile__(
+ "" hvc(5134) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a Cell (domain) as the handler of an interrupt.
+ *
+ * @details
+ * The Microvisor virtual GIC API permits an interrupt source to be
+ * dynamically
+ * assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ * only
+ * be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ * operation attaches the interrupt to a Cell as a shared interrupt.
+ *
+ * Interrupt sources are addressed using capabilities. This operation,
+ * given
+ * a capability for an interrupt that is not currently attached to any
+ * handler,
+ * can attach the interrupt at a given unused IRQ number. If the IRQ
+ * number
+ * is between 0 and 31 (the GIC SGI or Private Peripheral Interrupt
+ * range), it
+ * will return an error; if it is between 32 and 1019 (the GIC
+ * Shared Peripheral Interrupt range), it will be attached to the
+ * specified
+ * Cell.
+ *
+ * @note In most cases, interrupt sources are attached at system
+ * construction
+ * time by the OK Tool. It is not normally necessary to attach an
+ * interrupt
+ * source before using it.
+ *
+ * @param domain_cap
+ * A domain capability.
+ * @param irq_cap
+ * A virtual interrupt capability.
+ * @param irq_num
+ * An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+ okl4_interrupt_number_t irq_num)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)domain_cap;
+ register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+ register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+ __asm__ __volatile__(
+ ""hvc(5135)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+ okl4_interrupt_number_t irq_num)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)domain_cap;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+ __asm__ __volatile__(
+ "" hvc(5135) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Unregister an interrupt.
+ *
+ * @details
+ * Detach the given interrupt source from its registered handler. The
+ * interrupt
+ * will be deactivated and disabled, and will not be delivered again
+ * until it
+ * is reattached. However, if it is configured in edge triggering mode,
+ * its
+ * pending state will be preserved.
+ *
+ * @param irq_cap
+ * A virtual interrupt capability.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq_cap;
+ __asm__ __volatile__(
+ ""hvc(5136)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq_cap;
+ __asm__ __volatile__(
+ "" hvc(5136) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable the interrupt distributor.
+ *
+ * @details
+ * This API enables the interrupt distributor, in the same form as
+ * writing to
+ * the enable bit in (\p GICD_CTLR).
+ *
+ * @param enable
+ * A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)enable;
+ __asm__ __volatile__(
+ ""hvc(5133)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+ __asm__ __volatile__(
+ "" hvc(5133) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal the end of the interrupt handling routine.
+ *
+ * @details
+ * This API informs the virtual GIC that handling for a given interrupt
+ * has
+ * completed. It marks the interrupt as inactive, and decreases the
+ * running
+ * interrupt priority of the calling vCPU. This may cause immediate
+ * delivery of
+ * another interrupt, possibly with the same number, if one is enabled
+ * and
+ * pending.
+ *
+ * The specified interrupt number and source must match the active
+ * interrupt
+ * that was most recently returned by an @ref okl4_sys_interrupt_ack
+ * invocation. If multiple interrupts have been acknowledged and not yet
+ * ended,
+ * they must be ended in the reversed order of their acknowledgement.
+ *
+ * @note Invoking this API is equivalent to writing to the GIC CPU
+ * Interface's End of Interrupt Register (\p GICC_EOIR), with \p EOImode
+ * set to 0 in \p GICC_CTLR.
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ * @param source
+ * The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1") = (uint32_t)source;
+ __asm__ __volatile__(
+ ""hvc(5129)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)source;
+ __asm__ __volatile__(
+ "" hvc(5129) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve the highest-priority pending interrupt.
+ *
+ * @details
+ * This API returns the number and source of the highest-priority
+ * enabled,
+ * pending and inactive interrupt that is targeted at the calling vCPU
+ * and has higher priority than the calling vCPU's running group
+ * priority.
+ *
+ * If no such interrupt exists, interrupt number 1023 is returned. If
+ * the
+ * returned interrupt number is 16 or greater, the source ID is 0;
+ * otherwise it
+ * is the vCPU ID of the vCPU that raised the interrupt (which is always
+ * in the
+ * same Cell as the caller).
+ *
+ * @note Invoking this API is equivalent to reading from the GIC CPU
+ * Interface's Highest Priority Pending Interrupt Register (\p
+ * GICC_HPPIR).
+ *
+ *
+ * @retval irq
+ * An interrupt line number for the virtual GIC.
+ * @retval source
+ * The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+ struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+ register uint32_t r0 asm("r0");
+ register uint32_t r1 asm("r1");
+ __asm__ __volatile__(
+ ""hvc(5137)"\n\t"
+ : "=r"(r0), "=r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ result.irq = (okl4_interrupt_number_t)(r0);
+ result.source = (uint8_t)(r1);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+ struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+ register okl4_register_t x0 asm("x0");
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5137) "\n\t"
+ : "=r"(x0), "=r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.irq = (okl4_interrupt_number_t)(x0);
+ result.source = (uint8_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Fetch the payload flags of a virtual interrupt.
+ *
+ * @details
+ * This fetches and clears the accumulated payload flags for a virtual
+ * interrupt that has been raised by the Microvisor, or by a vCPU
+ * invoking
+ * the @ref okl4_sys_vinterrupt_raise API.
+ *
+ * If the virtual interrupt is configured for level triggering, clearing
+ * the
+ * accumulated flags by calling this function will also clear the
+ * pending state
+ * of the interrupt.
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ * The resulting error value.
+ * @retval payload
+ * Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp payload_tmp;
+ struct _okl4_sys_interrupt_get_payload_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1");
+ register uint32_t r2 asm("r2");
+ __asm__ __volatile__(
+ ""hvc(5132)"\n\t"
+ : "=r"(r1), "=r"(r2), "+r"(r0)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ payload_tmp.words.lo = r1;
+ payload_tmp.words.hi = r2;
+ result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+ struct _okl4_sys_interrupt_get_payload_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5132) "\n\t"
+ : "=r"(x1), "+r"(x0)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.payload = (okl4_virq_flags_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the number of supported CPUs and interrupt lines.
+ *
+ * @details
+ * This API returns the number of CPUs and interrupt lines supported by
+ * the
+ * virtual interrupt controller, in the same form as is found in the GIC
+ * Distributor's Interrupt Controller Type Register (\p GICD_TYPER), in
+ * the \p CPUNumber and \p ITLinesNumber fields.
+ *
+ *
+ * @retval cpunumber
+ * The number of supported target CPUs, minus 1.
+ * @retval itnumber
+ * The number of supported groups of 32 interrupt lines, minus 1.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+ struct _okl4_sys_interrupt_limits_return result;
+
+ register uint32_t r0 asm("r0");
+ register uint32_t r1 asm("r1");
+ __asm__ __volatile__(
+ ""hvc(5138)"\n\t"
+ : "=r"(r0), "=r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ result.cpunumber = (okl4_count_t)(r0);
+ result.itnumber = (okl4_count_t)(r1);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+ struct _okl4_sys_interrupt_limits_return result;
+
+ register okl4_register_t x0 asm("x0");
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5138) "\n\t"
+ : "=r"(x0), "=r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.cpunumber = (okl4_count_t)(x0);
+ result.itnumber = (okl4_count_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable delivery of an interrupt.
+ *
+ * @detail
+ * This prevents future delivery of the specified interrupt. It does not
+ * affect any currently active delivery (that is, end-of-interrupt must
+ * still be called). It also does not affect the pending state, so it
+ * cannot
+ * cause loss of edge-triggered interrupts.
+ *
+ * @note Invoking this API is equivalent to writing a single bit to one
+ * of the
+ * GIC Distributor's Interrupt Clear-Enable Registers (\p
+ * GICD_ICENABLERn).
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ __asm__ __volatile__(
+ ""hvc(5130)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ __asm__ __volatile__(
+ "" hvc(5130) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a Software-Generated Interrupt.
+ *
+ * @detail
+ * This allows a Software-Generated Interrupt (with interrupt number
+ * between
+ * 0 and 15) to be raised, targeted at a specified set of vCPUs within
+ * the
+ * same Cell. No capability is required, but interrupts cannot be raised
+ * to
+ * other Cells with this API.
+ *
+ * @note Invoking this API is equivalent to writing to the GIC
+ * Distributor's
+ * Software Generated Interrupt Register (\p GICD_SGIR).
+ *
+ * @note This API is distinct from the @ref okl4_sys_vinterrupt_raise
+ * API,
+ * which raises a virtual interrupt source which may communicate across
+ * Cell boundaries, and requires an explicit capability.
+ *
+ * @param sgir
+ * A description of the Software-Generated Interrupt to raise.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)sgir;
+ __asm__ __volatile__(
+ ""hvc(5145)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)sgir;
+ __asm__ __volatile__(
+ "" hvc(5145) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the interrupt priority binary point for the calling vCPU.
+ *
+ * @details
+ * The GIC splits IRQ priority values into two subfields: the group
+ * priority
+ * and the subpriority. The binary point is the index of the most
+ * significant
+ * bit of the subpriority (that is, one less than the number of
+ * subpriority
+ * bits).
+ *
+ * An interrupt can preempt another active interrupt only if its group
+ * priority
+ * is higher than the running group priority; the subpriority is ignored
+ * for
+ * this comparison. The subpriority is used to determine which of two
+ * equal
+ * priority interrupts will be delivered first.
+ *
+ * @note Invoking this API is equivalent to writing to the GIC CPU
+ * Interface's Binary Point Register (\p GICC_BPR).
+ *
+ * @param binary_point
+ * The number of bits in the subpriority field, minus 1.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)binary_point;
+ __asm__ __volatile__(
+ ""hvc(5139)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)binary_point;
+ __asm__ __volatile__(
+ "" hvc(5139) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the configuration of an interrupt.
+ *
+ * @detail
+ * This sets the triggering type of a specified interrupt to either
+ * edge or level triggering.
+ *
+ * The specified interrupt must be disabled.
+ *
+ * @note Some interrupt sources only support one triggering type. In
+ * this case,
+ * calling this API for the interrupt will have no effect.
+ *
+ * @note Invoking this API is equivalent to writing a single two-bit
+ * field of
+ * one of the GIC Distributor's Interrupt Configuration Registers (\p
+ * GICD_ICFGRn).
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ * @param icfgr
+ * The configuration bits for the interrupt line.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+ okl4_gicd_icfgr_t icfgr)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1") = (uint32_t)icfgr;
+ __asm__ __volatile__(
+ ""hvc(5140)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+ okl4_gicd_icfgr_t icfgr)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)icfgr;
+ __asm__ __volatile__(
+ "" hvc(5140) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable or disable the signaling of interrupts to the vCPU.
+ *
+ * @details
+ * Enable or disable the signaling of interrupts by the virtual CPU
+ * interface
+ * to the connected vCPU.
+ *
+ * @note Interrupt signalling is initially disabled, as required by the
+ * GIC
+ * API specification. This API must therefore be invoked at least once
+ * before
+ * any interrupts will be delivered.
+ *
+ * @note Invoking this API is equivalent to writing to the GIC CPU
+ * Interface's Control Register (\p GICC_CTLR) using the "GICv1 without
+ * Security Extensions or Non-Secure" format, which contains only a
+ * single
+ * enable bit.
+ *
+ * @param enable
+ * A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)enable;
+ __asm__ __volatile__(
+ ""hvc(5141)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+ __asm__ __volatile__(
+ "" hvc(5141) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery priority of an interrupt.
+ *
+ * @detail
+ * This changes the delivery priority of an interrupt. It has no
+ * immediate
+ * effect on currently active interrupts, but will take effect once the
+ * interrupt is deactivated.
+ *
+ * @note The number of significant bits in this value is
+ * implementation-defined. In this configuration, 4 significant priority
+ * bits are implemented. The most significant bit is always at the high
+ * end
+ * of the priority byte; that is, at bit 7.
+ *
+ * @note Smaller values represent higher priority. The highest possible
+ * priority is 0; the lowest possible priority has all implemented bits
+ * set,
+ * and in this implementation is currently 0xf0.
+ *
+ * @note Invoking this API is equivalent to writing a single byte of one
+ * of the
+ * GIC Distributor's Interrupt Priority Registers (\p GICD_IPRIORITYn).
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ * @param priority
+ * A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1") = (uint32_t)priority;
+ __asm__ __volatile__(
+ ""hvc(5142)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)priority;
+ __asm__ __volatile__(
+ "" hvc(5142) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the minimum interrupt priority of the calling vCPU.
+ *
+ * @details
+ * This API sets the calling vCPU's minimum running interrupt priority.
+ * Interrupts will only be delivered if they have priority higher than
+ * this
+ * value.
+ *
+ * @note Higher priority corresponds to a lower priority value; i.e.,
+ * the
+ * highest priority value is 0.
+ *
+ * @note The priority mask is initially set to 0, which prevents all
+ * interrupt
+ * delivery, as required by the GIC API specification. This API must
+ * therefore
+ * be invoked at least once before any interrupts will be delivered.
+ *
+ * @note Invoking this API is equivalent to writing to the GIC CPU
+ * Interface's Interrupt Priority Mask Register (\p GICC_PMR).
+ *
+ * @param priority_mask
+ * A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)priority_mask;
+ __asm__ __volatile__(
+ ""hvc(5143)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)priority_mask;
+ __asm__ __volatile__(
+ "" hvc(5143) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery targets of a shared interrupt.
+ *
+ * @detail
+ * This sets the subset of a Cell's vCPUs to which the specified shared
+ * interrupt (with an interrupt number between 32 and 1019) can be
+ * delivered.
+ * The target vCPUs are specified by an 8-bit bitfield. Note that no
+ * more
+ * than 8 targets are supported by the GIC API, so vCPUs with IDs beyond
+ * 8
+ * will never receive interrupts.
+ *
+ * @note The GIC API does not specify how or when the implementation
+ * selects a
+ * target for interrupt delivery. Most hardware implementations deliver
+ * to
+ * all possible targets simultaneously, and then cancel all but the
+ * first to
+ * be acknowledged. In the interests of efficiency, the OKL4 Microvisor
+ * does
+ * not implement this behaviour; instead, it chooses an arbitrary target
+ * when
+ * the interrupt first becomes deliverable.
+ *
+ * @note Invoking this API is equivalent to writing a single byte of one
+ * of the
+ * GIC Distributor's Interrupt Targets Registers (\p GICD_ITARGETSRn).
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ * @param cpu_mask
+ * Bitmask of vCPU IDs.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1") = (uint32_t)cpu_mask;
+ __asm__ __volatile__(
+ ""hvc(5144)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)cpu_mask;
+ __asm__ __volatile__(
+ "" hvc(5144) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable delivery of an interrupt.
+ *
+ * @detail
+ * This permits delivery of the specified interrupt, once it is pending
+ * and
+ * inactive and has sufficiently high priority.
+ *
+ * @note Invoking this API is equivalent to writing a single bit to one
+ * of the
+ * GIC Distributor's Interrupt Set-Enable Registers (\p
+ * GICD_ISENABLERn).
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ __asm__ __volatile__(
+ ""hvc(5131)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ __asm__ __volatile__(
+ "" hvc(5131) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enter the kernel interactive debugger.
+ *
+ * @details
+ * This is available on a debug build of the kernel, otherwise the operation
+ * is a
+ * no-op.
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+ __asm__ __volatile__(
+ ""hvc(5120)"\n\t"
+ :
+ :
+ : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+ );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+ __asm__ __volatile__(
+ "" hvc(5120) "\n\t"
+ :
+ :
+ : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the debug name of the addressed kernel object.
+ *
+ * @details
+ * The debug version of the Microvisor kernel supports naming of kernel
+ * objects
+ * to aid debugging. The object names are visible to external debuggers
+ * such
+ * as a JTAG tool, as well as the in-built interactive kernel debugger.
+ *
+ * The target object may be any Microvisor object for which the caller
+ * has a
+ * capability with the master rights.
+ *
+ * Debug names may be up to 16 characters long, with four characters
+ * stored per
+ * \p name[x] argument in little-endian order (on a 32-bit machine).
+ *
+ * @param object
+ * The target kernel object id.
+ * @param name0
+ * @param name1
+ * @param name2
+ * @param name3
+ *
+ * @retval error
+ * Resulting error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+ , uint32_t name2, uint32_t name3)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)object;
+ register uint32_t r1 asm("r1") = (uint32_t)name0;
+ register uint32_t r2 asm("r2") = (uint32_t)name1;
+ register uint32_t r3 asm("r3") = (uint32_t)name2;
+ register uint32_t r4 asm("r4") = (uint32_t)name3;
+ __asm__ __volatile__(
+ ""hvc(5121)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+ , uint32_t name2, uint32_t name3)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)object;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)name0;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)name1;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)name2;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)name3;
+ __asm__ __volatile__(
+ "" hvc(5121) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Call a kernel support package (KSP) defined interface.
+ *
+ * @details
+ * The KSP procedure call allows the caller to interact with customer
+ * specific functions provided by the kernel support package. The caller
+ * must possess a capability with the appropriate rights to a KSP agent
+ * in
+ * order to call this interface.
+ *
+ * The remaining parameters provided are passed directly to the KSP
+ * without
+ * any inspection.
+ *
+ * The KSP can return an error code and up to three return words.
+ *
+ * @param agent
+ * The target KSP agent
+ * @param operation
+ * The operation to be performed
+ * @param arg0
+ * An argument for the operation
+ * @param arg1
+ * An argument for the operation
+ * @param arg2
+ * An argument for the operation
+ * @param arg3
+ * An argument for the operation
+ *
+ * @retval error
+ * The resulting error
+ * @retval ret0
+ * A return value for the operation
+ * @retval ret1
+ * A return value for the operation
+ * @retval ret2
+ * A return value for the operation
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+ okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+ okl4_ksp_arg_t arg3)
+{
+ struct _okl4_sys_ksp_procedure_call_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)agent;
+ register uint32_t r1 asm("r1") = (uint32_t)operation;
+ register uint32_t r2 asm("r2") = (uint32_t)arg0;
+ register uint32_t r3 asm("r3") = (uint32_t)arg1;
+ register uint32_t r4 asm("r4") = (uint32_t)arg2;
+ register uint32_t r5 asm("r5") = (uint32_t)arg3;
+ __asm__ __volatile__(
+ ""hvc(5197)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+ :
+ : "cc", "memory"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ result.ret0 = (okl4_ksp_arg_t)(r1);
+ result.ret1 = (okl4_ksp_arg_t)(r2);
+ result.ret2 = (okl4_ksp_arg_t)(r3);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+ okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+ okl4_ksp_arg_t arg3)
+{
+ struct _okl4_sys_ksp_procedure_call_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)agent;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)operation;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)arg0;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)arg1;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)arg2;
+ register okl4_register_t x5 asm("x5") = (okl4_register_t)arg3;
+ __asm__ __volatile__(
+ "" hvc(5197) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+ :
+ : "cc", "memory", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.ret0 = (okl4_ksp_arg_t)(x1);
+ result.ret1 = (okl4_ksp_arg_t)(x2);
+ result.ret2 = (okl4_ksp_arg_t)(x3);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Attach a segment to an MMU.
+ *
+ * @details
+ * Before any mappings based on a segment can be established in the
+ * MMU's
+ * address space, the segment must be attached to the MMU. Attaching a
+ * segment
+ * serves to reference count the segment, preventing modifications to
+ * the
+ * segment being made.
+ *
+ * A segment may be attached to an MMU multiple times, at the same or
+ * different index. Each time a segment is attached to an MMU, the
+ * attachment
+ * reference count is incremented.
+ *
+ * Attaching segments to an MMU is also important for VMMU objects in
+ * that the
+ * segment attachment index is used as a segment reference in the
+ * virtual page
+ * table format.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param segment_id
+ * The target segment id.
+ * @param index
+ * Index into the MMU's segment attachment table.
+ * @param perms
+ * Mapping permissions.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+ okl4_count_t index, okl4_page_perms_t perms)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+ register uint32_t r2 asm("r2") = (uint32_t)index;
+ register uint32_t r3 asm("r3") = (uint32_t)perms;
+ __asm__ __volatile__(
+ ""hvc(5152)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+ okl4_count_t index, okl4_page_perms_t perms)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)perms;
+ __asm__ __volatile__(
+ "" hvc(5152) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Detach a segment from an MMU.
+ *
+ * @details
+ * A segment can be detached from an MMU or vMMU, causing its reference
+ * count
+ * to decrease. When the reference count reaches zero, the attachment is
+ * removed and all mappings in the MMU object relating to the segment
+ * are
+ * removed.
+ *
+ * The detach-segment operation is potentially a long running operation,
+ * especially if invoked on a vMMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param index
+ * Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)index;
+ __asm__ __volatile__(
+ ""hvc(5153)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)index;
+ __asm__ __volatile__(
+ "" hvc(5153) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ * @details
+ * This causes the kernel to remove all mappings covering the specified
+ * virtual address range.
+ *
+ * @note The size of the range must be a multiple of 1MB and the
+ * starting virtual address must be 1MB aligned.
+ * There is no support for flushing at a finer granularity.
+ * If a fine grained flush is required, the caller should use the
+ * @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ * The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * The starting virtual address of the range.
+ * (Must be 1MB aligned)
+ * @param size
+ * Size of the range. (Must be a multiple of 1MB)
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_lsize_tr_t size)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)size;
+ __asm__ __volatile__(
+ ""hvc(5154)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_lsize_tr_t size)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+ __asm__ __volatile__(
+ "" hvc(5154) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ * @details
+ * This causes the kernel to remove all mappings covering the specified
+ * virtual address range.
+ *
+ * @note The size of the range must be a multiple of 1MB and the
+ * starting virtual address must be 1MB aligned.
+ * There is no support for flushing at a finer granularity.
+ * If a fine grained flush is required, the caller should use the
+ * @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ * The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param count_pn
+ * The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_lsize_pn_t count_pn)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+ __asm__ __volatile__(
+ ""hvc(5155)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_lsize_pn_t count_pn)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+ __asm__ __volatile__(
+ "" hvc(5155) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ * @details
+ * This operation performs a lookup in the MMU's pagetable for a mapping
+ * derived from a specified segment.
+ *
+ * If a mapping is found that is derived from the specified segment, the
+ * operation will return the segment offset, size and the page
+ * attributes
+ * associated with the mapping.
+ *
+ * If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ * operation
+ * will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * Virtual address of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ * Resulting error.
+ * @retval offset
+ * Offset into the segment.
+ * @retval size
+ * Size of the mapping, in bytes. Size will be one of the supported
+ * machine page-sizes. If a segment search was performed, the lower
+ * 10-bits of
+ * size contain the returned segment-index.
+ * @retval page_attr
+ * Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp size_tmp;
+ struct _okl4_sys_mmu_lookup_page_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3");
+ register uint32_t r4 asm("r4");
+ __asm__ __volatile__(
+ ""hvc(5156)"\n\t"
+ : "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ result.offset = (okl4_psize_tr_t)(r1);
+ size_tmp.words.lo = r2;
+ size_tmp.words.hi = r3;
+ result.size = (okl4_mmu_lookup_size_t)(size_tmp.val);
+ result.page_attr = (_okl4_page_attribute_t)(r4);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index)
+{
+ struct _okl4_sys_mmu_lookup_page_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3");
+ __asm__ __volatile__(
+ "" hvc(5156) "\n\t"
+ : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.offset = (okl4_psize_tr_t)(x1);
+ result.size = (okl4_mmu_lookup_size_t)(x2);
+ result.page_attr = (_okl4_page_attribute_t)(x3);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ * @details
+ * This operation performs a lookup in the MMU's pagetable for a mapping
+ * derived from a specified segment.
+ *
+ * If a mapping is found that is derived from the specified segment, the
+ * operation will return the segment offset, size and the page
+ * attributes
+ * associated with the mapping.
+ *
+ * If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ * operation
+ * will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ *
+ * @retval segment_index
+ * Index into the MMU's segment attachment table, or error.
+ * @retval offset_pn
+ * Offset into the segment in units of page numbers.
+ * @retval count_pn
+ * The number of consecutive pages to map/unmap.
+ * @retval page_attr
+ * Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index)
+{
+ struct _okl4_sys_mmu_lookup_pn_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3");
+ __asm__ __volatile__(
+ ""hvc(5157)"\n\t"
+ : "=r"(r3), "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ result.segment_index = (okl4_mmu_lookup_index_t)(r0);
+ result.offset_pn = (okl4_psize_pn_t)(r1);
+ result.count_pn = (okl4_lsize_pn_t)(r2);
+ result.page_attr = (_okl4_page_attribute_t)(r3);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index)
+{
+ struct _okl4_sys_mmu_lookup_pn_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3");
+ __asm__ __volatile__(
+ "" hvc(5157) "\n\t"
+ : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.segment_index = (okl4_mmu_lookup_index_t)(x0);
+ result.offset_pn = (okl4_psize_pn_t)(x1);
+ result.count_pn = (okl4_lsize_pn_t)(x2);
+ result.page_attr = (_okl4_page_attribute_t)(x3);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ * @details
+ * This operation installs a new mapping into the MMU at the specified
+ * virtual
+ * address. The mapping's physical address is determined from the
+ * specified
+ * segment and offset, and the mapping's size and attributes are
+ * provided in
+ * \p size and \p page_attr.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * Virtual address of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param offset
+ * Offset into the segment.
+ * @param size
+ * Size of the mapping, in bytes.
+ * @param page_attr
+ * Mapping attributes.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+ , _okl4_page_attribute_t page_attr)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)offset;
+ register uint32_t r4 asm("r4") = (uint32_t)size;
+ register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+ __asm__ __volatile__(
+ ""hvc(5158)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+ :
+ : "cc", "memory"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+ , _okl4_page_attribute_t page_attr)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)offset;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)size;
+ register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+ __asm__ __volatile__(
+ "" hvc(5158) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+ :
+ : "cc", "memory", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ * @details
+ * This operation installs a new mapping into the MMU at the specified
+ * virtual
+ * address. The mapping's physical address is determined from the
+ * specified
+ * segment and offset, and the mapping's size and attributes are
+ * provided in
+ * \p size and \p page_attr.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param segment_offset_pn
+ * Offset into the segment in units of page numbers.
+ * @param count_pn
+ * The number of consecutive pages to map/unmap.
+ * @param page_attr
+ * Mapping attributes.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+ okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)segment_offset_pn;
+ register uint32_t r4 asm("r4") = (uint32_t)count_pn;
+ register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+ __asm__ __volatile__(
+ ""hvc(5159)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+ :
+ : "cc", "memory"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+ okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)segment_offset_pn;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)count_pn;
+ register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+ __asm__ __volatile__(
+ "" hvc(5159) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+ :
+ : "cc", "memory", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ * @details
+ * This operation removes a mapping from the MMU at the specified
+ * virtual
+ * address. The size and address specified must match the size and base
+ * address of the mapping being removed.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * Virtual address of the mapping.
+ * @param size
+ * Size of the mapping, in bytes.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_lsize_tr_t size)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)size;
+ __asm__ __volatile__(
+ ""hvc(5160)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_lsize_tr_t size)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+ __asm__ __volatile__(
+ "" hvc(5160) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ * @details
+ * This operation removes a mapping from the MMU at the specified
+ * virtual
+ * address. The size and address specified must match the size and base
+ * address of the mapping being removed.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param count_pn
+ * The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_lsize_pn_t count_pn)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+ __asm__ __volatile__(
+ ""hvc(5161)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_lsize_pn_t count_pn)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+ __asm__ __volatile__(
+ "" hvc(5161) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * Virtual address of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param size
+ * Size of the mapping, in bytes.
+ * @param attrs
+ * Mapping cache attributes.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_lsize_tr_t size,
+ okl4_page_cache_t attrs)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)size;
+ register uint32_t r4 asm("r4") = (uint32_t)attrs;
+ __asm__ __volatile__(
+ ""hvc(5162)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_lsize_tr_t size,
+ okl4_page_cache_t attrs)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+ __asm__ __volatile__(
+ "" hvc(5162) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param vaddr
+ * Virtual address of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param size
+ * Size of the mapping, in bytes.
+ * @param perms
+ * Mapping permissions.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_lsize_tr_t size,
+ okl4_page_perms_t perms)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)size;
+ register uint32_t r4 asm("r4") = (uint32_t)perms;
+ __asm__ __volatile__(
+ ""hvc(5163)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+ okl4_count_t segment_index, okl4_lsize_tr_t size,
+ okl4_page_perms_t perms)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+ __asm__ __volatile__(
+ "" hvc(5163) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param count_pn
+ * The number of consecutive pages to map/unmap.
+ * @param attrs
+ * Mapping cache attributes.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+ okl4_page_cache_t attrs)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+ register uint32_t r4 asm("r4") = (uint32_t)attrs;
+ __asm__ __volatile__(
+ ""hvc(5164)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+ okl4_page_cache_t attrs)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+ __asm__ __volatile__(
+ "" hvc(5164) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ * The target MMU id.
+ * @param laddr_pn
+ * Logical address page-number of the mapping.
+ * @param segment_index
+ * Index into the MMU's segment attachment table.
+ * @param count_pn
+ * The number of consecutive pages to map/unmap.
+ * @param perms
+ * Mapping permissions.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+ okl4_page_perms_t perms)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+ register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+ register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+ register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+ register uint32_t r4 asm("r4") = (uint32_t)perms;
+ __asm__ __volatile__(
+ ""hvc(5165)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+ okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+ okl4_page_perms_t perms)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+ __asm__ __volatile__(
+ "" hvc(5165) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * A NULL system-call for latency measurement.
+ *
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+ register uint32_t r0 asm("r0");
+ __asm__ __volatile__(
+ ""hvc(5198)"\n\t"
+ : "=r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+ register okl4_register_t x0 asm("x0");
+ __asm__ __volatile__(
+ "" hvc(5198) "\n\t"
+ : "=r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Control a pipe, including reset, ready and halt functionality.
+ *
+ * @param pipe_id
+ * The capability identifier of the pipe.
+ * @param control
+ * The state control argument.
+ *
+ * @retval error
+ * The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+ register uint32_t r1 asm("r1") = (uint32_t)control;
+ __asm__ __volatile__(
+ ""hvc(5146)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)control;
+ __asm__ __volatile__(
+ "" hvc(5146) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Send a message from a microvisor pipe.
+ *
+ * @param pipe_id
+ * The capability identifier of the pipe.
+ * @param buf_size
+ * Size of the receive buffer.
+ * @param data
+ * Pointer to receive buffer.
+ *
+ * @retval error
+ * The returned error code.
+ * @retval size
+ * Size of the received message.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp size_tmp;
+ struct _okl4_sys_pipe_recv_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+ register uint32_t r1 asm("r1") = (uint32_t)buf_size;
+ register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+ __asm__ __volatile__(
+ ""hvc(5147)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ size_tmp.words.lo = r1;
+ size_tmp.words.hi = r2;
+ result.size = (okl4_ksize_t)(size_tmp.val);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+ struct _okl4_sys_pipe_recv_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)buf_size;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+ __asm__ __volatile__(
+ "" hvc(5147) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.size = (okl4_ksize_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * Send a message to a microvisor pipe.
+ *
+ * @param pipe_id
+ * The capability identifier of the pipe.
+ * @param size
+ * Size of the message to send.
+ * @param data
+ * Pointer to the message payload to send.
+ *
+ * @retval error
+ * The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+ register uint32_t r1 asm("r1") = (uint32_t)size;
+ register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+ __asm__ __volatile__(
+ ""hvc(5148)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)size;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+ __asm__ __volatile__(
+ "" hvc(5148) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Waive the current vCPU's priority.
+ *
+ * @details
+ * This operation allows a vCPU to change its waived priority. A vCPU
+ * has
+ * both a base priority and its current priority.
+ *
+ * The base priority is the statically assigned maximum priority that a
+ * vCPU
+ * has been given. The current priority is the priority used for system
+ * scheduling and is limited to the range of zero to the base priority.
+ *
+ * The `waive-priority` operation allows a vCPU to set its current
+ * priority
+ * and is normally used to reduce its current priority. This allows a
+ * vCPU to
+ * perform work at a lower system priority, and supports the interleaved
+ * scheduling feature.
+ *
+ * A vCPU's priority is restored to its base priority whenever an
+ * interrupt
+ * that has the vCPU registered as its handler is raised. This allows
+ * interrupt handling and guest operating systems to return to the base
+ * priority to potentially do higher priority work.
+ *
+ * After calling this interface an immediate reschedule will be
+ * performed.
+ *
+ * @param priority
+ * New vCPU priority.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)priority;
+ __asm__ __volatile__(
+ ""hvc(5151)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)priority;
+ __asm__ __volatile__(
+ "" hvc(5151) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ *
+ * @retval reg_w0
+ * @retval reg_w1
+ * @retval reg_w2
+ * @retval reg_w3
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+ okl4_register_and_set_t reg_and_set)
+{
+ struct _okl4_sys_remote_get_register_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+ register uint32_t r2 asm("r2");
+ register uint32_t r3 asm("r3");
+ register uint32_t r4 asm("r4");
+ __asm__ __volatile__(
+ ""hvc(5200)"\n\t"
+ : "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ result.reg_w0 = (uint32_t)(r0);
+ result.reg_w1 = (uint32_t)(r1);
+ result.reg_w2 = (uint32_t)(r2);
+ result.reg_w3 = (uint32_t)(r3);
+ result.error = (okl4_error_t)(r4);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+ okl4_register_and_set_t reg_and_set)
+{
+ struct _okl4_sys_remote_get_register_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+ register okl4_register_t x2 asm("x2");
+ register okl4_register_t x3 asm("x3");
+ register okl4_register_t x4 asm("x4");
+ __asm__ __volatile__(
+ "" hvc(5200) "\n\t"
+ : "=r"(x2), "=r"(x3), "=r"(x4), "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x5", "x6", "x7"
+ );
+
+
+ result.reg_w0 = (uint32_t)(x0);
+ result.reg_w1 = (uint32_t)(x1);
+ result.reg_w2 = (uint32_t)(x2);
+ result.reg_w3 = (uint32_t)(x3);
+ result.error = (okl4_error_t)(x4);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+ void *regs)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)set;
+ register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+ __asm__ __volatile__(
+ ""hvc(5201)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+ void *regs)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+ __asm__ __volatile__(
+ "" hvc(5201) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_READ_MEMORY32
+ *
+ * @param target
+ * @param address
+ *
+ * @retval data
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+ struct _okl4_sys_remote_read_memory32_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)(address & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5202)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.data = (uint32_t)(r0);
+ result.error = (okl4_error_t)(r1);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+ struct _okl4_sys_remote_read_memory32_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+ __asm__ __volatile__(
+ "" hvc(5202) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.data = (uint32_t)(x0);
+ result.error = (okl4_error_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ * @param reg_w0
+ * @param reg_w1
+ * @param reg_w2
+ * @param reg_w3
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+ okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+ uint32_t reg_w2, uint32_t reg_w3)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+ register uint32_t r2 asm("r2") = (uint32_t)reg_w0;
+ register uint32_t r3 asm("r3") = (uint32_t)reg_w1;
+ register uint32_t r4 asm("r4") = (uint32_t)reg_w2;
+ register uint32_t r5 asm("r5") = (uint32_t)reg_w3;
+ __asm__ __volatile__(
+ ""hvc(5203)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+ :
+ : "cc", "memory"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+ okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+ uint32_t reg_w2, uint32_t reg_w3)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)reg_w0;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)reg_w1;
+ register okl4_register_t x4 asm("x4") = (okl4_register_t)reg_w2;
+ register okl4_register_t x5 asm("x5") = (okl4_register_t)reg_w3;
+ __asm__ __volatile__(
+ "" hvc(5203) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+ :
+ : "cc", "memory", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+ void *regs)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)set;
+ register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+ __asm__ __volatile__(
+ ""hvc(5204)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+ void *regs)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+ __asm__ __volatile__(
+ "" hvc(5204) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_WRITE_MEMORY32
+ *
+ * @param target
+ * @param address
+ * @param data
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+ uint32_t data)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)(address & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)data;
+ __asm__ __volatile__(
+ ""hvc(5205)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+ uint32_t data)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)data;
+ __asm__ __volatile__(
+ "" hvc(5205) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Retrieve suspend status.
+ *
+ * @param scheduler_id
+ * The scheduler capability identifier.
+ *
+ * @retval error
+ * Resulting error.
+ * @retval power_suspend_version
+ * The power suspend versioning number
+ * @retval power_suspend_running_count
+ * The number of running power_suspend watched vCPUs
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+ struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+ register uint32_t r1 asm("r1");
+ register uint32_t r2 asm("r2");
+ __asm__ __volatile__(
+ ""hvc(5206)"\n\t"
+ : "=r"(r1), "=r"(r2), "+r"(r0)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ result.power_suspend_version = (uint32_t)(r1);
+ result.power_suspend_running_count = (uint32_t)(r2);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+ struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+ register okl4_register_t x1 asm("x1");
+ register okl4_register_t x2 asm("x2");
+ __asm__ __volatile__(
+ "" hvc(5206) "\n\t"
+ : "=r"(x1), "=r"(x2), "+r"(x0)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.power_suspend_version = (uint32_t)(x1);
+ result.power_suspend_running_count = (uint32_t)(x2);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * Register a vCPU for suspend count tracking.
+ *
+ * @param scheduler_id
+ * The scheduler capability identifier.
+ * @param vcpu_id
+ * The target vCPU capability identifier.
+ * @param watch
+ * Whether to register or unregister
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+ okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
+ register uint32_t r2 asm("r2") = (uint32_t)watch;
+ __asm__ __volatile__(
+ ""hvc(5207)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+ okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)watch;
+ __asm__ __volatile__(
+ "" hvc(5207) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a physical CPU.
+ *
+ * @param phys_cpu
+ * The physical CPU capability id.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+ __asm__ __volatile__(
+ ""hvc(5168)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+ __asm__ __volatile__(
+ "" hvc(5168) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a physical CPU.
+ *
+ * This operation enables profiling of physical CPU related properties
+ * such as
+ * core usage and context switch count.
+ *
+ * @param phys_cpu
+ * The physical CPU capability id.
+ *
+ * @retval error
+ * Resulting error.
+ * @retval timestamp
+ * The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp timestamp_tmp;
+ struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+ register uint32_t r1 asm("r1");
+ register uint32_t r2 asm("r2");
+ __asm__ __volatile__(
+ ""hvc(5169)"\n\t"
+ : "=r"(r1), "=r"(r2), "+r"(r0)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ timestamp_tmp.words.lo = r1;
+ timestamp_tmp.words.hi = r2;
+ result.timestamp = (uint64_t)(timestamp_tmp.val);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+ struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5169) "\n\t"
+ : "=r"(x1), "+r"(x0)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.timestamp = (uint64_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a physical CPU core.
+ *
+ * @details
+ * This operation returns a set of profiling data relating to a physical
+ * CPU.
+ * A timestamp of the current system time in units of microseconds is
+ * recorded
+ * during the operation. The remaining data fields indicate runtime and
+ * number of events since the last invocation of this operation.
+ *
+ * After the profiling data is retrieved, the kernel resets all metrics
+ * to
+ * zero.
+ *
+ * @par profile data
+ * For a physical CPU, the returned data is:
+ * - \p cpu_time: Idle time of the CPU in microseconds.
+ * - \p context_switches: Number of context switches on this core.
+ * - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param phys_cpu
+ * The physical CPU capability id.
+ * @param profile
+ * `return by reference`. Profiling data.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+ struct okl4_schedule_profile_data *profile)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+ register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+ __asm__ __volatile__(
+ ""hvc(5170)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+ struct okl4_schedule_profile_data *profile)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+ __asm__ __volatile__(
+ "" hvc(5170) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a vCPU.
+ *
+ * @param vcpu
+ * The target vCPU id.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ __asm__ __volatile__(
+ ""hvc(5171)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ __asm__ __volatile__(
+ "" hvc(5171) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a vCPU.
+ *
+ * This operation enables profiling of vCPU related properties such as
+ * execution time and context switch count.
+ *
+ * @param vcpu
+ * The target vCPU id.
+ *
+ * @retval error
+ * Resulting error.
+ * @retval timestamp
+ * The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp timestamp_tmp;
+ struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ register uint32_t r1 asm("r1");
+ register uint32_t r2 asm("r2");
+ __asm__ __volatile__(
+ ""hvc(5172)"\n\t"
+ : "=r"(r1), "=r"(r2), "+r"(r0)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ timestamp_tmp.words.lo = r1;
+ timestamp_tmp.words.hi = r2;
+ result.timestamp = (uint64_t)(timestamp_tmp.val);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+ struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ register okl4_register_t x1 asm("x1");
+ __asm__ __volatile__(
+ "" hvc(5172) "\n\t"
+ : "=r"(x1), "+r"(x0)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.timestamp = (uint64_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a vCPU.
+ *
+ * @details
+ * This operation returns a set of profiling data relating to a vCPU.
+ * A timestamp of the current system time in units of microseconds is
+ * recorded
+ * during the operation. The remaining data fields indicate runtime and
+ * number of events since the last invocation of this operation.
+ *
+ * After the profiling data is retrieved, the kernel resets all metrics
+ * to
+ * zero.
+ *
+ * @par profile data
+ * For a vCPU, the returned data is:
+ * - \p cpu_time: Execution time of the vCPU in microseconds.
+ * - \p context_switches: Number of context switches.
+ * - \p cpu_migrations: Number of migrations between physical CPUs.
+ * - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param vcpu
+ * The target vCPU id.
+ * @param profile
+ * `return by reference`. Profiling data.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+ struct okl4_schedule_profile_data *profile)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+ __asm__ __volatile__(
+ ""hvc(5173)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+ struct okl4_schedule_profile_data *profile)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+ __asm__ __volatile__(
+ "" hvc(5173) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: SCHEDULER_SUSPEND
+ *
+ * @param scheduler_id
+ * @param power_state
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+ okl4_power_state_t power_state)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+ register uint32_t r1 asm("r1") = (uint32_t)power_state;
+ __asm__ __volatile__(
+ ""hvc(5150)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+ okl4_power_state_t power_state)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)power_state;
+ __asm__ __volatile__(
+ "" hvc(5150) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Cancel an active timeout on a specified timer.
+ *
+ * @details
+ * This operation cancels an active timeout on a specified timer. The
+ * operation returns the time that was remaining on the cancelled
+ * timeout.
+ * If there was not an active timeout, the operation returns an error.
+ *
+ * The returned remaining time is formatted in the requested units from
+ * the
+ * \p flags argument.
+ *
+ * The operation will also return the \p old_flags field indicating
+ * whether
+ * the canceled timeout was periodic or one-shot and whether it was an
+ * absolute or relative timeout.
+ *
+ * @par flags
+ * - If the \p units flag is set, the remaining time is returned in
+ * units
+ * of timer ticks. The length of a timer tick is KSP defined and may be
+ * obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ * - If the \p units flag is not set, the remaining time is returned in
+ * nanoseconds.
+ *
+ * @par old_flags
+ * - If the \p periodic flag is set, the cancelled timeout was periodic.
+ * - If the \p periodic flag is not set, the cancelled timeout was
+ * one-shot.
+ * - If the \p absolute flag is set, the cancelled timeout was an
+ * absolute time.
+ * - If the \p absolute flag is not set, the cancelled timeout was a
+ * relative time.
+ *
+ * @param timer
+ * The target timer capability.
+ * @param flags
+ * Flags for the requested operation.
+ *
+ * @retval remaining
+ * Time that was remaining on the cancelled timeout.
+ * @retval old_flags
+ * Flags relating to the cancelled timeout.
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp remaining_tmp;
+ struct _okl4_sys_timer_cancel_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)timer;
+ register uint32_t r1 asm("r1") = (uint32_t)flags;
+ register uint32_t r2 asm("r2");
+ register uint32_t r3 asm("r3");
+ __asm__ __volatile__(
+ ""hvc(5176)"\n\t"
+ : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ remaining_tmp.words.lo = r0;
+ remaining_tmp.words.hi = r1;
+ result.remaining = (uint64_t)(remaining_tmp.val);
+ result.old_flags = (okl4_timer_flags_t)(r2);
+ result.error = (okl4_error_t)(r3);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ struct _okl4_sys_timer_cancel_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+ register okl4_register_t x2 asm("x2");
+ __asm__ __volatile__(
+ "" hvc(5176) "\n\t"
+ : "=r"(x2), "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.remaining = (uint64_t)(x0);
+ result.old_flags = (okl4_timer_flags_t)(x1);
+ result.error = (okl4_error_t)(x2);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the timer frequency and obtain time conversion constants.
+ *
+ * @details
+ * This operation returns the timer frequency and the conversion
+ * constants
+ * that may be used to convert between units of nanoseconds and units of
+ * ticks.
+ *
+ * The timer frequency is returned as a 64-bit value in units of
+ * micro-hertz.
+ * (1000000 = 1Hz).
+ * The timer resolution (or period) can be calculated from the
+ * frequency.
+ *
+ * The time conversion constants are retuned as values \p a and \p b
+ * which can
+ * be used for unit conversions as follows:
+ * - ns = (ticks) * \p a / \p b
+ * - ticks = (ns * \p b) / \p a
+ *
+ * @note
+ * The constants are provided by the KSP module and are designed to be
+ * used
+ * for simple overflow-free computation using 64-bit arithmetic covering
+ * the
+ * time values from 0 to 2 years.
+ *
+ * @param timer
+ * The target timer capability.
+ *
+ * @retval tick_freq
+ * The timer frequency [in units of micro-hertz].
+ * @retval a
+ * Ticks to nanoseconds conversion multiplier.
+ * @retval b
+ * Ticks to nanoseconds conversion divisor.
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp tick_freq_tmp;
+ struct _okl4_sys_timer_get_resolution_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)timer;
+ register uint32_t r1 asm("r1");
+ register uint32_t r2 asm("r2");
+ register uint32_t r3 asm("r3");
+ register uint32_t r4 asm("r4");
+ __asm__ __volatile__(
+ ""hvc(5177)"\n\t"
+ : "=r"(r1), "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ tick_freq_tmp.words.lo = r0;
+ tick_freq_tmp.words.hi = r1;
+ result.tick_freq = (uint64_t)(tick_freq_tmp.val);
+ result.a = (uint32_t)(r2);
+ result.b = (uint32_t)(r3);
+ result.error = (okl4_error_t)(r4);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+ struct _okl4_sys_timer_get_resolution_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+ register okl4_register_t x1 asm("x1");
+ register okl4_register_t x2 asm("x2");
+ register okl4_register_t x3 asm("x3");
+ __asm__ __volatile__(
+ "" hvc(5177) "\n\t"
+ : "=r"(x1), "=r"(x2), "=r"(x3), "+r"(x0)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.tick_freq = (uint64_t)(x0);
+ result.a = (uint32_t)(x1);
+ result.b = (uint32_t)(x2);
+ result.error = (okl4_error_t)(x3);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the current system time.
+ *
+ * @details
+ * This operation returns the current absolute system time. The \p flags
+ * argument is used to specify the desired units for the return value.
+ *
+ * - Absolute time is based on an arbitrary time zero, defined to be at
+ * or before the time of boot.
+ *
+ * @par flags
+ * - If the \p units flag is set, the time is returned in units
+ * of timer ticks. The length of a timer tick is KSP defined and may
+ * be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ * - If the \p units flag is not set, the time is returned in
+ * terms of nanoseconds.
+ *
+ * @param timer
+ * The target timer capability.
+ * @param flags
+ * Flags for the requested operation.
+ *
+ * @retval time
+ * The current system time.
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp time_tmp;
+ struct _okl4_sys_timer_get_time_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)timer;
+ register uint32_t r1 asm("r1") = (uint32_t)flags;
+ register uint32_t r2 asm("r2");
+ __asm__ __volatile__(
+ ""hvc(5178)"\n\t"
+ : "=r"(r2), "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ time_tmp.words.lo = r0;
+ time_tmp.words.hi = r1;
+ result.time = (uint64_t)(time_tmp.val);
+ result.error = (okl4_error_t)(r2);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ struct _okl4_sys_timer_get_time_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+ __asm__ __volatile__(
+ "" hvc(5178) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.time = (uint64_t)(x0);
+ result.error = (okl4_error_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query a timer about an active timeout.
+ *
+ * @details
+ * The operation queries a timer about an active timeout. If there is no
+ * active timeout, this operation returns an error.
+ *
+ * If the timer has an active timeout, this operation returns the
+ * remaining
+ * time and the flags associated with the timeout. The remaining time is
+ * returned in the requested units from the \p flags argument.
+ *
+ * The operation also returns the \p active_flags field indicating
+ * whether the
+ * active timeout is periodic or one-shot and whether it was an absolute
+ * or
+ * relative timeout.
+ *
+ * @par flags
+ * - If the \p units flag is set, the remaining time is returned in
+ * units
+ * of timer ticks. The length of a timer tick is KSP defined and may
+ * be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ * - If the \p units flag is not set, the remaining time is returned in
+ * units of nanoseconds.
+ *
+ * @par active_flags
+ * - If the \p periodic flag is set, the timeout is periodic.
+ * - If the \p periodic flag is not set, the timeout is one-shot.
+ * - If the \p absolute flag is set, the timeout is an absolute time.
+ * - If the \p absolute flag is not set, the timeout is a relative time.
+ *
+ * @param timer
+ * The target timer capability.
+ * @param flags
+ * Flags for the requested operation.
+ *
+ * @retval remaining
+ * Time remaining before the next timeout.
+ * @retval active_flags
+ * Flags relating to the active timeout.
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp remaining_tmp;
+ struct _okl4_sys_timer_query_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)timer;
+ register uint32_t r1 asm("r1") = (uint32_t)flags;
+ register uint32_t r2 asm("r2");
+ register uint32_t r3 asm("r3");
+ __asm__ __volatile__(
+ ""hvc(5179)"\n\t"
+ : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ remaining_tmp.words.lo = r0;
+ remaining_tmp.words.hi = r1;
+ result.remaining = (uint64_t)(remaining_tmp.val);
+ result.active_flags = (okl4_timer_flags_t)(r2);
+ result.error = (okl4_error_t)(r3);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+ struct _okl4_sys_timer_query_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+ register okl4_register_t x2 asm("x2");
+ __asm__ __volatile__(
+ "" hvc(5179) "\n\t"
+ : "=r"(x2), "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.remaining = (uint64_t)(x0);
+ result.active_flags = (okl4_timer_flags_t)(x1);
+ result.error = (okl4_error_t)(x2);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a timer with a specified timeout.
+ *
+ * @details
+ * This operation optionally resets then starts a timer with a new
+ * timeout.
+ * The specified timeout may be an `absolute` or `relative` time, may be
+ * `one-shot` or `periodic` and may be specified in units of nanoseconds
+ * or
+ * ticks.
+ *
+ * @par flags
+ * - If the \p absolute flag is set, the timeout is treated as an
+ * absolute time based on an arbitrary time zero, defined to be at or
+ * before the time of boot.
+ * - If the \p absolute flag is not set, the timeout is treated as a
+ * relative time a specified amount of into the future. E.g. 10ms from
+ * now.
+ * - If the \p periodic flag is set, the timeout is treated as a
+ * periodic
+ * timeout that repeats with a period equal to the specified timeout.
+ * - If the \p periodic flag is not set, the timeout is treated as a
+ * one-shot timeout that expires at the specified time and does not
+ * repeat.
+ * - If the \p units flag is set, the timeout is specified in units of
+ * timer ticks. The length of a timer tick is KSP defined and may be
+ * obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ * - If the \p units flag is not set, the timeout is specified in units
+ * of nanoseconds.
+ * - The \p reload flag allows an active timeout to be cancelled and the
+ * new timeout is programmed into the timer.
+ *
+ * @param timer
+ * The target timer capability.
+ * @param timeout
+ * The timeout value.
+ * @param flags
+ * Flags for the requested operation.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+ okl4_timer_flags_t flags)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)timer;
+ register uint32_t r1 asm("r1") = (uint32_t)(timeout & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((timeout >> 32) & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)flags;
+ __asm__ __volatile__(
+ ""hvc(5180)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+ okl4_timer_flags_t flags)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)timeout;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)flags;
+ __asm__ __volatile__(
+ "" hvc(5180) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: TRACEBUFFER_SYNC
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+ __asm__ __volatile__(
+ ""hvc(5199)"\n\t"
+ :
+ :
+ : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+ );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+ __asm__ __volatile__(
+ "" hvc(5199) "\n\t"
+ :
+ :
+ : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Reset a vCPU.
+ *
+ * @details
+ * This operation resets a vCPU to its boot state.
+ *
+ * @param vcpu
+ * The target vCPU capability.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ __asm__ __volatile__(
+ ""hvc(5122)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ __asm__ __volatile__(
+ "" hvc(5122) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a vCPU executing.
+ *
+ * @details
+ * This operation starts a stopped vCPU, at an optionally specified
+ * instruction pointer. If instruction pointer is not to be set the
+ * value at the previous stop is preserved.
+ *
+ * @param vcpu
+ * The target vCPU capability.
+ * @param set_ip
+ * Should the instruction pointer be set.
+ * @param ip
+ * Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ register uint32_t r1 asm("r1") = (uint32_t)set_ip;
+ register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)ip;
+ __asm__ __volatile__(
+ ""hvc(5123)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)set_ip;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)ip;
+ __asm__ __volatile__(
+ "" hvc(5123) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Stop a vCPU executing.
+ *
+ * @details
+ * This operation stops a vCPU's execution until next restarted.
+ *
+ * @param vcpu
+ * The target vCPU capability.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ __asm__ __volatile__(
+ ""hvc(5124)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ __asm__ __volatile__(
+ "" hvc(5124) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Switch a vCPU's execution mode between 32-bit and 64-bit.
+ *
+ * @details
+ * This operation resets a vCPU to its boot state, switches between
+ * 32-bit
+ * and 64-bit modes, and restarts execution at the specified address.
+ * The
+ * start address must be valid in the vCPU's initial address space,
+ * which may
+ * not be the same as the caller's address space.
+ *
+ * @param vcpu
+ * The target vCPU capability.
+ * @param to_64bit
+ * The vCPU will reset in 64-bit mode if true; otherwise in 32-bit mode
+ * @param set_ip
+ * Should the instruction pointer be set.
+ * @param ip
+ * Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ * Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+ okl4_bool_t set_ip, void *ip)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+ register uint32_t r1 asm("r1") = (uint32_t)to_64bit;
+ register uint32_t r2 asm("r2") = (uint32_t)set_ip;
+ register uint32_t r3 asm("r3") = (uint32_t)(uintptr_t)ip;
+ __asm__ __volatile__(
+ ""hvc(5125)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+ :
+ : "cc", "memory", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+ okl4_bool_t set_ip, void *ip)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)to_64bit;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)set_ip;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)(uintptr_t)ip;
+ __asm__ __volatile__(
+ "" hvc(5125) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal a synchronization event.
+ *
+ * @details
+ * This operation sets the wakeup flags for all vCPUs in the caller's
+ * domain.
+ * If any vCPUs in the domain are waiting due to a pending `sync_wfe`
+ * operation,
+ * they will be released from the wait. The OKL4 scheduler will then
+ * determine
+ * which vCPUs should execute first based on their priority.
+ *
+ * This `sync_sev` operation is non-blocking and is used to signal other
+ * vCPUs
+ * about some user-defined event. A typical use of this operation is to
+ * signal
+ * the release of a spinlock to other waiting vCPUs.
+ *
+ * @see _okl4_sys_vcpu_sync_wfe
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+ __asm__ __volatile__(
+ ""hvc(5126)"\n\t"
+ :
+ :
+ : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+ );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+ __asm__ __volatile__(
+ "" hvc(5126) "\n\t"
+ :
+ :
+ : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Wait for a synchronization event.
+ *
+ * @details
+ * This operation is used to defer the execution of a vCPU while it is
+ * waiting
+ * for an event. This operation is non-blocking, in that if no other
+ * vCPUs in
+ * the system are runnable, the operation will complete and the vCPU is
+ * not
+ * blocked. The `sync_wfe` operation uses the \p holder argument as a
+ * hint to
+ * the vCPU the caller is waiting on.
+ *
+ * This operation first determines whether there is a pending wakeup
+ * flag set
+ * for the calling vCPU. If the flag is set, the operation clears the
+ * flag and
+ * returns immediately. If the caller has provided a valid \p holder id,
+ * and
+ * the holder is currently executing on a different physical core, the
+ * operation again returns immediately.
+ *
+ * In all other cases, the Microvisor records that the vCPU is waiting
+ * and
+ * reduces the vCPU's priority temporarily to the lowest priority in
+ * the system. The scheduler is then invoked to rebalance the system.
+ *
+ * A waiting vCPU will continue execution and return from the `sync_wfe`
+ * operation as soon as no higher priority vCPUs in the system are
+ * available
+ * for scheduling, or a wake-up event is signalled by another vCPU in
+ * the same
+ * domain.
+ *
+ * @par holder
+ * The holder identifier may be a valid capability to another vCPU, or
+ * an
+ * invalid id. If the provided id is valid, it is used as a hint to the
+ * Microvisor that the caller is waiting on the specified vCPU. The
+ * `vcpu_sync` API is optimized for short spinlock type use-cases and
+ * will
+ * therefore allow the caller to continue execution without waiting, if
+ * the
+ * target \p holder vCPU is presently running on another physical core.
+ * This
+ * is done to reduce latency with the expectation that the holder vCPU
+ * will
+ * soon release the lock.
+ *
+ * @see _okl4_sys_vcpu_sync_sev
+ *
+ * @param holder
+ * Capability of the vCPU to wait for, or an invalid designator.
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)holder;
+ __asm__ __volatile__(
+ ""hvc(5127)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)holder;
+ __asm__ __volatile__(
+ "" hvc(5127) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Atomically fetch an interrupt payload and raise a virtual interrupt.
+ *
+ * @details
+ * This API is equivalent to atomically calling @ref
+ * sys_interrupt_get_payload
+ * and @ref sys_vinterrupt_modify. Typically, the specified virtual
+ * interrupt
+ * will be one that is not attached to the specified virtual interrupt
+ * source,
+ * but this is not enforced. If only one virtual interrupt source is
+ * affected,
+ * then the @ref sys_interrupt_get_payload phase will occur first.
+ *
+ * Certain communication protocols must perform this sequence of
+ * operations
+ * atomically in order to maintain consistency. Other than being atomic,
+ * this
+ * is no different to invoking the two component operations separately.
+ *
+ * @param irq
+ * An interrupt line number for the virtual GIC.
+ * @param virqline
+ * A virtual interrupt line capability.
+ * @param mask
+ * A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ * A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ * The resulting error value.
+ * @retval payload
+ * Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+ okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+ typedef union {
+ struct uint64 {
+ uint32_t lo;
+ uint32_t hi;
+ } words;
+ uint64_t val;
+ } okl4_uint64_tmp;
+ okl4_uint64_tmp payload_tmp;
+ struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+ register uint32_t r0 asm("r0") = (uint32_t)irq;
+ register uint32_t r1 asm("r1") = (uint32_t)virqline;
+ register uint32_t r2 asm("r2") = (uint32_t)(mask & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)((mask >> 32) & 0xffffffff);
+ register uint32_t r4 asm("r4") = (uint32_t)(payload & 0xffffffff);
+ register uint32_t r5 asm("r5") = (uint32_t)((payload >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5194)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+ :
+ : "cc", "memory"
+ );
+
+
+ result.error = (okl4_error_t)(r0);
+ payload_tmp.words.lo = r1;
+ payload_tmp.words.hi = r2;
+ result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+ return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+ okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+ struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)virqline;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)mask;
+ register okl4_register_t x3 asm("x3") = (okl4_register_t)payload;
+ __asm__ __volatile__(
+ "" hvc(5194) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+ :
+ : "cc", "memory", "x4", "x5", "x6", "x7"
+ );
+
+
+ result.error = (okl4_error_t)(x0);
+ result.payload = (okl4_virq_flags_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, and modify the payload flags.
+ *
+ * @details
+ * This triggers a virtual interrupt by raising a virtual interrupt
+ * source. A
+ * virtual interrupt source object is distinct from a virtual interrupt.
+ * A
+ * virtual interrupt source is always linked to a virtual interrupt, but
+ * the
+ * reverse is not true.
+ *
+ * Each Microvisor virtual interrupt carries a payload of flags which
+ * may be
+ * fetched by the recipient of the interrupt. An interrupt payload is a
+ * @ref
+ * okl4_word_t sized array of flags, packed into a single word. Flags
+ * are
+ * cleared whenever the interrupt recipient fetches the payload with the
+ * @ref
+ * okl4_sys_interrupt_get_payload API.
+ *
+ * The interrupt-modify API allows the caller to pass in a new set of
+ * flags in
+ * the \p payload field, and a set of flags to keep from the previous
+ * payload
+ * in the \p mask field. If the interrupt has previously been raised and
+ * not
+ * yet delivered, the flags accumulate with a mask; that is, each flag
+ * is the
+ * boolean OR of the specified value with the boolean AND of its
+ * previous
+ * value and the mask.
+ *
+ * When the recipient has configured the interrupt for edge triggering,
+ * an
+ * invocation of this API is counted as a single edge; this triggers
+ * interrupt
+ * delivery if the interrupt is not already pending, irrespective of the
+ * payload. If the interrupt is configured for level triggering, then
+ * its
+ * pending state is the boolean OR of its payload flags after any
+ * specified
+ * flags are cleared or raised; at least one flag must be set in the new
+ * payload to permit delivery of a level-triggered interrupt.
+ *
+ * @param virqline
+ * A virtual interrupt line capability.
+ * @param mask
+ * A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ * A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+ okl4_virq_flags_t payload)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)virqline;
+ register uint32_t r1 asm("r1") = (uint32_t)(mask & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((mask >> 32) & 0xffffffff);
+ register uint32_t r3 asm("r3") = (uint32_t)(payload & 0xffffffff);
+ register uint32_t r4 asm("r4") = (uint32_t)((payload >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5195)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+ :
+ : "cc", "memory", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+ okl4_virq_flags_t payload)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)mask;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)payload;
+ __asm__ __volatile__(
+ "" hvc(5195) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, setting specified payload flags.
+ *
+ * @details
+ * This triggers a virtual interrupt by raising a virtual interrupt
+ * source. A
+ * virtual interrupt source object is distinct from a virtual interrupt.
+ * A
+ * virtual interrupt source is always linked to a virtual interrupt, but
+ * the
+ * reverse is not true.
+ *
+ * Each Microvisor virtual interrupt carries a payload of flags which
+ * may be
+ * fetched by the recipient of the interrupt. An interrupt payload is a
+ * @ref
+ * okl4_word_t sized array of flags, packed into a single word. Flags
+ * are
+ * cleared whenever the interrupt recipient fetches the payload with the
+ * @ref
+ * okl4_sys_interrupt_get_payload API.
+ *
+ * The interrupt-raise API allows the caller to pass in a new set of
+ * flags in
+ * the \p payload field. If the interrupt has previously been raised and
+ * not
+ * yet delivered, the flags accumulate; that is, each flag is the
+ * boolean OR
+ * of its previous value and the specified value.
+ *
+ * When the recipient has configured the interrupt for edge triggering,
+ * an
+ * invocation of this API is counted as a single edge; this triggers
+ * interrupt
+ * delivery if the interrupt is not already pending, irrespective of the
+ * payload. If the interrupt is configured for level triggering, then
+ * its
+ * pending state is the boolean OR of its payload flags after any
+ * specified
+ * flags are raised; at least one flag must be set in the new payload to
+ * permit delivery of a level-triggered interrupt.
+ *
+ * @note Invoking this API is equivalent to invoking the @ref
+ * okl4_sys_vinterrupt_modify API with all bits set in the \p mask
+ * value.
+ *
+ * @note This API is distinct from the @ref okl4_sys_interrupt_raise
+ * API,
+ * which raises a local software-generated interrupt without requiring
+ * an
+ * explicit capability.
+ *
+ * @param virqline
+ * A virtual interrupt line capability.
+ * @param payload
+ * A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ * The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)virqline;
+ register uint32_t r1 asm("r1") = (uint32_t)(payload & 0xffffffff);
+ register uint32_t r2 asm("r2") = (uint32_t)((payload >> 32) & 0xffffffff);
+ __asm__ __volatile__(
+ ""hvc(5196)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)payload;
+ __asm__ __volatile__(
+ "" hvc(5196) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+
+/*lint -restore */
+
+#endif /* !ASSEMBLY */
+
+/*
+ * Assembly system call prototypes / numbers.
+ */
+
+/** @addtogroup lib_microvisor_syscall_numbers Microvisor System Call Numbers
+ * @{
+ */
+#define OKL4_SYSCALL_AXON_PROCESS_RECV 5184
+
+#define OKL4_SYSCALL_AXON_SET_HALTED 5186
+
+#define OKL4_SYSCALL_AXON_SET_RECV_AREA 5187
+
+#define OKL4_SYSCALL_AXON_SET_RECV_QUEUE 5188
+
+#define OKL4_SYSCALL_AXON_SET_RECV_SEGMENT 5189
+
+#define OKL4_SYSCALL_AXON_SET_SEND_AREA 5190
+
+#define OKL4_SYSCALL_AXON_SET_SEND_QUEUE 5191
+
+#define OKL4_SYSCALL_AXON_SET_SEND_SEGMENT 5192
+
+#define OKL4_SYSCALL_AXON_TRIGGER_SEND 5185
+
+#define OKL4_SYSCALL_INTERRUPT_ACK 5128
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_PRIVATE 5134
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_SHARED 5135
+
+#define OKL4_SYSCALL_INTERRUPT_DETACH 5136
+
+#define OKL4_SYSCALL_INTERRUPT_DIST_ENABLE 5133
+
+#define OKL4_SYSCALL_INTERRUPT_EOI 5129
+
+#define OKL4_SYSCALL_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING 5137
+
+#define OKL4_SYSCALL_INTERRUPT_GET_PAYLOAD 5132
+
+#define OKL4_SYSCALL_INTERRUPT_LIMITS 5138
+
+#define OKL4_SYSCALL_INTERRUPT_MASK 5130
+
+#define OKL4_SYSCALL_INTERRUPT_RAISE 5145
+
+#define OKL4_SYSCALL_INTERRUPT_SET_BINARY_POINT 5139
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONFIG 5140
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONTROL 5141
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY 5142
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY_MASK 5143
+
+#define OKL4_SYSCALL_INTERRUPT_SET_TARGETS 5144
+
+#define OKL4_SYSCALL_INTERRUPT_UNMASK 5131
+
+#define OKL4_SYSCALL_KDB_INTERACT 5120
+
+#define OKL4_SYSCALL_KDB_SET_OBJECT_NAME 5121
+
+#define OKL4_SYSCALL_KSP_PROCEDURE_CALL 5197
+
+#define OKL4_SYSCALL_MMU_ATTACH_SEGMENT 5152
+
+#define OKL4_SYSCALL_MMU_DETACH_SEGMENT 5153
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE 5154
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE_PN 5155
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PAGE 5156
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PN 5157
+
+#define OKL4_SYSCALL_MMU_MAP_PAGE 5158
+
+#define OKL4_SYSCALL_MMU_MAP_PN 5159
+
+#define OKL4_SYSCALL_MMU_UNMAP_PAGE 5160
+
+#define OKL4_SYSCALL_MMU_UNMAP_PN 5161
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_ATTRS 5162
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_PERMS 5163
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_ATTRS 5164
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_PERMS 5165
+
+#define OKL4_SYSCALL_PERFORMANCE_NULL_SYSCALL 5198
+
+#define OKL4_SYSCALL_PIPE_CONTROL 5146
+
+#define OKL4_SYSCALL_PIPE_RECV 5147
+
+#define OKL4_SYSCALL_PIPE_SEND 5148
+
+#define OKL4_SYSCALL_PRIORITY_WAIVE 5151
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTER 5200
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTERS 5201
+
+#define OKL4_SYSCALL_REMOTE_READ_MEMORY32 5202
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTER 5203
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTERS 5204
+
+#define OKL4_SYSCALL_REMOTE_WRITE_MEMORY32 5205
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_STATUS_SUSPENDED 5206
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_WATCH_SUSPENDED 5207
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_DISABLE 5168
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_ENABLE 5169
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_GET_DATA 5170
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_DISABLE 5171
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_ENABLE 5172
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_GET_DATA 5173
+
+#define OKL4_SYSCALL_SCHEDULER_SUSPEND 5150
+
+#define OKL4_SYSCALL_TIMER_CANCEL 5176
+
+#define OKL4_SYSCALL_TIMER_GET_RESOLUTION 5177
+
+#define OKL4_SYSCALL_TIMER_GET_TIME 5178
+
+#define OKL4_SYSCALL_TIMER_QUERY 5179
+
+#define OKL4_SYSCALL_TIMER_START 5180
+
+#define OKL4_SYSCALL_TRACEBUFFER_SYNC 5199
+
+#define OKL4_SYSCALL_VCPU_RESET 5122
+
+#define OKL4_SYSCALL_VCPU_START 5123
+
+#define OKL4_SYSCALL_VCPU_STOP 5124
+
+#define OKL4_SYSCALL_VCPU_SWITCH_MODE 5125
+
+#define OKL4_SYSCALL_VCPU_SYNC_SEV 5126
+
+#define OKL4_SYSCALL_VCPU_SYNC_WFE 5127
+
+#define OKL4_SYSCALL_VINTERRUPT_CLEAR_AND_RAISE 5194
+
+#define OKL4_SYSCALL_VINTERRUPT_MODIFY 5195
+
+#define OKL4_SYSCALL_VINTERRUPT_RAISE 5196
+
+/** @} */
+#undef hvc
+
+#if defined(_definitions_for_linters)
+/* Ignore lint identifier clashes for syscall names. */
+/*lint -esym(621, _okl4_sys_axon_process_recv) */
+/*lint -esym(621, _okl4_sys_axon_set_halted) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_area) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_segment) */
+/*lint -esym(621, _okl4_sys_axon_set_send_area) */
+/*lint -esym(621, _okl4_sys_axon_set_send_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_send_segment) */
+/*lint -esym(621, _okl4_sys_axon_trigger_send) */
+/*lint -esym(621, _okl4_sys_interrupt_ack) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_private) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_shared) */
+/*lint -esym(621, _okl4_sys_interrupt_detach) */
+/*lint -esym(621, _okl4_sys_interrupt_dist_enable) */
+/*lint -esym(621, _okl4_sys_interrupt_eoi) */
+/*lint -esym(621, _okl4_sys_interrupt_get_highest_priority_pending) */
+/*lint -esym(621, _okl4_sys_interrupt_get_payload) */
+/*lint -esym(621, _okl4_sys_interrupt_limits) */
+/*lint -esym(621, _okl4_sys_interrupt_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_raise) */
+/*lint -esym(621, _okl4_sys_interrupt_set_binary_point) */
+/*lint -esym(621, _okl4_sys_interrupt_set_config) */
+/*lint -esym(621, _okl4_sys_interrupt_set_control) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_set_targets) */
+/*lint -esym(621, _okl4_sys_interrupt_unmask) */
+/*lint -esym(621, _okl4_sys_kdb_interact) */
+/*lint -esym(621, _okl4_sys_kdb_set_object_name) */
+/*lint -esym(621, _okl4_sys_ksp_procedure_call) */
+/*lint -esym(621, _okl4_sys_mmu_attach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_detach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range_pn) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_page) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_pn) */
+/*lint -esym(621, _okl4_sys_mmu_map_page) */
+/*lint -esym(621, _okl4_sys_mmu_map_pn) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_page) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_pn) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_perms) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_perms) */
+/*lint -esym(621, _okl4_sys_performance_null_syscall) */
+/*lint -esym(621, _okl4_sys_pipe_control) */
+/*lint -esym(621, _okl4_sys_pipe_recv) */
+/*lint -esym(621, _okl4_sys_pipe_send) */
+/*lint -esym(621, _okl4_sys_priority_waive) */
+/*lint -esym(621, _okl4_sys_remote_get_register) */
+/*lint -esym(621, _okl4_sys_remote_get_registers) */
+/*lint -esym(621, _okl4_sys_remote_read_memory32) */
+/*lint -esym(621, _okl4_sys_remote_set_register) */
+/*lint -esym(621, _okl4_sys_remote_set_registers) */
+/*lint -esym(621, _okl4_sys_remote_write_memory32) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_status_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_watch_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_get_data) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_get_data) */
+/*lint -esym(621, _okl4_sys_scheduler_suspend) */
+/*lint -esym(621, _okl4_sys_timer_cancel) */
+/*lint -esym(621, _okl4_sys_timer_get_resolution) */
+/*lint -esym(621, _okl4_sys_timer_get_time) */
+/*lint -esym(621, _okl4_sys_timer_query) */
+/*lint -esym(621, _okl4_sys_timer_start) */
+/*lint -esym(621, _okl4_sys_tracebuffer_sync) */
+/*lint -esym(621, _okl4_sys_vcpu_reset) */
+/*lint -esym(621, _okl4_sys_vcpu_start) */
+/*lint -esym(621, _okl4_sys_vcpu_stop) */
+/*lint -esym(621, _okl4_sys_vcpu_switch_mode) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_sev) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_wfe) */
+/*lint -esym(621, _okl4_sys_vinterrupt_clear_and_raise) */
+/*lint -esym(621, _okl4_sys_vinterrupt_modify) */
+/*lint -esym(621, _okl4_sys_vinterrupt_raise) */
+#endif
+#endif /* __AUTO__USER_SYSCALLS_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/types.h b/include/microvisor/kernel/types.h
new file mode 100644
index 000000000000..c87285c776af
--- /dev/null
+++ b/include/microvisor/kernel/types.h
@@ -0,0 +1,16064 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+/** @addtogroup lib_microvisor_types Microvisor Types
+ * @{
+ */
+#ifndef __AUTO__MICROVISOR_TYPES_H__
+#define __AUTO__MICROVISOR_TYPES_H__
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_DEFAULT_PERMS OKL4_PAGE_PERMS_RWX
+#define OKL4_DEFAULT_CACHE_ATTRIBUTES OKL4_PAGE_CACHE_DEFAULT
+
+#if __SIZEOF_POINTER__ != 8
+#define __ptr64(type, name) union { type name; uint64_t _x_##name; }
+#define __ptr64_array(type, name) union { type val; uint64_t _x; } name
+#else
+#define __ptr64(type, name) type name
+#define __ptr64_array(type, name) type name
+#endif
+
+/**
+ The `okl4_bool_t` type represents a standard boolean value. Valid values are
+ restricted to @ref OKL4_TRUE and @ref OKL4_FALSE.
+*/
+
+typedef _Bool okl4_bool_t;
+
+
+
+
+
+
+
+
+/**
+ - BITS 7..0 - @ref OKL4_MASK_AFF0_ARM_MPIDR
+ - BITS 15..8 - @ref OKL4_MASK_AFF1_ARM_MPIDR
+ - BITS 23..16 - @ref OKL4_MASK_AFF2_ARM_MPIDR
+ - BIT 24 - @ref OKL4_MASK_MT_ARM_MPIDR
+ - BIT 30 - @ref OKL4_MASK_U_ARM_MPIDR
+ - BIT 31 - @ref OKL4_MASK_MP_ARM_MPIDR
+ - BITS 39..32 - @ref OKL4_MASK_AFF3_ARM_MPIDR
+*/
+
+/*lint -esym(621, okl4_arm_mpidr_t) */
+typedef uint64_t okl4_arm_mpidr_t;
+
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3);
+
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt);
+
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u);
+
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF0_MASK) */
+#define OKL4_ARM_MPIDR_AFF0_MASK ((okl4_arm_mpidr_t)255U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_MASK_AFF0_ARM_MPIDR ((okl4_arm_mpidr_t)255U)
+/*lint -esym(621, OKL4_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF1_MASK) */
+#define OKL4_ARM_MPIDR_AFF1_MASK ((okl4_arm_mpidr_t)255U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_MASK_AFF1_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF2_MASK) */
+#define OKL4_ARM_MPIDR_AFF2_MASK ((okl4_arm_mpidr_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_MASK_AFF2_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_MT_MASK) */
+#define OKL4_ARM_MPIDR_MT_MASK ((okl4_arm_mpidr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MT_ARM_MPIDR) */
+#define OKL4_MASK_MT_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_U_MASK) */
+#define OKL4_ARM_MPIDR_U_MASK ((okl4_arm_mpidr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_U_ARM_MPIDR) */
+#define OKL4_MASK_U_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_U_ARM_MPIDR) */
+#define OKL4_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_WIDTH_U_ARM_MPIDR) */
+#define OKL4_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_MP_MASK) */
+#define OKL4_ARM_MPIDR_MP_MASK ((okl4_arm_mpidr_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MP_ARM_MPIDR) */
+#define OKL4_MASK_MP_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF3_MASK) */
+#define OKL4_ARM_MPIDR_AFF3_MASK ((okl4_arm_mpidr_t)255U << 32) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_MASK_AFF3_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 32)
+/*lint -esym(621, OKL4_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/*lint -sem(okl4_arm_mpidr_getaff0, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x)
+{
+ uint64_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint64_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff0, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff0) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_aff0;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff1, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x)
+{
+ uint64_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 8;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint64_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff1, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff1) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 8;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_aff1;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff2, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x)
+{
+ uint64_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 16;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint64_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff2, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff2) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 16;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_aff2;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 24;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setmt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setmt) */
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 24;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_mt;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getu, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setu, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setu) */
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_u;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmp, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 31;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_getaff3, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x)
+{
+ uint64_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 32;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint64_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff3, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff3) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 32;
+ uint64_t field : 8;
+ } bits;
+ okl4_arm_mpidr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_aff3;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x)
+{
+ *x = (okl4_arm_mpidr_t)2147483648U;
+}
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force)
+{
+ okl4_arm_mpidr_t x = (okl4_arm_mpidr_t)p;
+ if (force) {
+ x &= ~(okl4_arm_mpidr_t)0x80000000U;
+ x |= (okl4_arm_mpidr_t)0x80000000U; /* x.mp */
+ }
+ return x;
+}
+
+
+
+
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON ((uint32_t)(3735928559U))
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF ((uint32_t)(0xffffffffU))
+
+
+
+
+typedef uint32_t okl4_arm_psci_function_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION ((okl4_arm_psci_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND ((okl4_arm_psci_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_OFF ((okl4_arm_psci_function_t)0x2U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_ON ((okl4_arm_psci_function_t)0x3U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO ((okl4_arm_psci_function_t)0x4U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE ((okl4_arm_psci_function_t)0x5U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE ((okl4_arm_psci_function_t)0x6U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU ((okl4_arm_psci_function_t)0x7U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF ((okl4_arm_psci_function_t)0x8U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET ((okl4_arm_psci_function_t)0x9U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES ((okl4_arm_psci_function_t)0xaU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE ((okl4_arm_psci_function_t)0xbU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND ((okl4_arm_psci_function_t)0xcU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE ((okl4_arm_psci_function_t)0xdU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND ((okl4_arm_psci_function_t)0xeU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE ((okl4_arm_psci_function_t)0xfU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY ((okl4_arm_psci_function_t)0x10U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT ((okl4_arm_psci_function_t)0x11U)
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_CPU_OFF) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_CPU_ON) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT));
+}
+
+
+
+typedef uint32_t okl4_arm_psci_result_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ARM_PSCI_RESULT_SUCCESS ((okl4_arm_psci_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS ((okl4_arm_psci_result_t)0xfffffff7U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ARM_PSCI_RESULT_DISABLED ((okl4_arm_psci_result_t)0xfffffff8U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ARM_PSCI_RESULT_NOT_PRESENT ((okl4_arm_psci_result_t)0xfffffff9U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE ((okl4_arm_psci_result_t)0xfffffffaU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ARM_PSCI_RESULT_ON_PENDING ((okl4_arm_psci_result_t)0xfffffffbU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ARM_PSCI_RESULT_ALREADY_ON ((okl4_arm_psci_result_t)0xfffffffcU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ARM_PSCI_RESULT_DENIED ((okl4_arm_psci_result_t)0xfffffffdU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS ((okl4_arm_psci_result_t)0xfffffffeU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED ((okl4_arm_psci_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_ARM_PSCI_RESULT_SUCCESS) ||
+ (var == OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) ||
+ (var == OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) ||
+ (var == OKL4_ARM_PSCI_RESULT_DENIED) ||
+ (var == OKL4_ARM_PSCI_RESULT_ALREADY_ON) ||
+ (var == OKL4_ARM_PSCI_RESULT_ON_PENDING) ||
+ (var == OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) ||
+ (var == OKL4_ARM_PSCI_RESULT_NOT_PRESENT) ||
+ (var == OKL4_ARM_PSCI_RESULT_DISABLED) ||
+ (var == OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS));
+}
+
+
+/**
+ - BITS 15..0 - @ref OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE
+ - BIT 16 - @ref OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE
+ - BITS 25..24 - @ref OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE
+*/
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_t) */
+typedef uint32_t okl4_arm_psci_suspend_state_t;
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU ((okl4_arm_psci_suspend_state_t)(0U))
+
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK ((okl4_arm_psci_suspend_state_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK ((okl4_arm_psci_suspend_state_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK ((okl4_arm_psci_suspend_state_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/*lint -sem(okl4_arm_psci_suspend_state_getstateid, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setstateid, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setstateid) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_state_id;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerdown, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerdown, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerdown) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_power_down;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerlevel, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 2;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerlevel, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerlevel) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 2;
+ } bits;
+ okl4_arm_psci_suspend_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_power_level;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x)
+{
+ *x = (okl4_arm_psci_suspend_state_t)0U;
+}
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_arm_psci_suspend_state_t x = (okl4_arm_psci_suspend_state_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_MMU_ENABLE_ARM_SCTLR
+ - BIT 1 - @ref OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR
+ - BIT 2 - @ref OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR
+ - BIT 3 - @ref OKL4_MASK_STACK_ALIGN_ARM_SCTLR
+ - BIT 4 - @ref OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR
+ - BIT 5 - @ref OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR
+ - BIT 6 - @ref OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR
+ - BIT 7 - @ref OKL4_MASK_IT_DISABLE_ARM_SCTLR
+ - BIT 8 - @ref OKL4_MASK_SETEND_DISABLE_ARM_SCTLR
+ - BIT 9 - @ref OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR
+ - BIT 11 - @ref OKL4_MASK_RESERVED11_ARM_SCTLR
+ - BIT 12 - @ref OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR
+ - BIT 13 - @ref OKL4_MASK_VECTORS_BIT_ARM_SCTLR
+ - BIT 14 - @ref OKL4_MASK_DCACHE_ZERO_ARM_SCTLR
+ - BIT 15 - @ref OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR
+ - BIT 16 - @ref OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR
+ - BIT 18 - @ref OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR
+ - BIT 19 - @ref OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR
+ - BIT 20 - @ref OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR
+ - BIT 22 - @ref OKL4_MASK_RESERVED22_ARM_SCTLR
+ - BIT 23 - @ref OKL4_MASK_RESERVED23_ARM_SCTLR
+ - BIT 24 - @ref OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR
+ - BIT 25 - @ref OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR
+ - BIT 28 - @ref OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR
+ - BIT 29 - @ref OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR
+ - BIT 30 - @ref OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR
+*/
+
+/*lint -esym(621, okl4_arm_sctlr_t) */
+typedef uint32_t okl4_arm_sctlr_t;
+
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe);
+
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit);
+
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0);
+
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access);
+
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero);
+
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type);
+
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc);
+
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x);
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_SCTLR_MMU_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_MMU_ENABLE_MASK ((okl4_arm_sctlr_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_MMU_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U)
+/*lint -esym(621, OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_MASK ((okl4_arm_sctlr_t)1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK ((okl4_arm_sctlr_t)1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK) */
+#define OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK ((okl4_arm_sctlr_t)1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 6)
+/*lint -esym(621, OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_IT_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_IT_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_IT_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_SETEND_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_SETEND_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_SETEND_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 8)
+/*lint -esym(621, OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK) */
+#define OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK ((okl4_arm_sctlr_t)1U << 9) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 9)
+/*lint -esym(621, OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED11_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED11_MASK ((okl4_arm_sctlr_t)1U << 11) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED11_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 11)
+/*lint -esym(621, OKL4_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 12) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 12)
+/*lint -esym(621, OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_VECTORS_BIT_MASK) */
+#define OKL4_ARM_SCTLR_VECTORS_BIT_MASK ((okl4_arm_sctlr_t)1U << 13) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_MASK_VECTORS_BIT_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 13)
+/*lint -esym(621, OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DCACHE_ZERO_MASK) */
+#define OKL4_ARM_SCTLR_DCACHE_ZERO_MASK ((okl4_arm_sctlr_t)1U << 14) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_MASK_DCACHE_ZERO_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 14)
+/*lint -esym(621, OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK) */
+#define OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK ((okl4_arm_sctlr_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK ((okl4_arm_sctlr_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK ((okl4_arm_sctlr_t)1U << 18) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 18)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 19) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 19)
+/*lint -esym(621, OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 20) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 20)
+/*lint -esym(621, OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED22_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED22_MASK ((okl4_arm_sctlr_t)1U << 22) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED22_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 22)
+/*lint -esym(621, OKL4_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED23_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED23_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED23_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
+/*lint -esym(621, OKL4_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 25) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 25)
+/*lint -esym(621, OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 28) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 29) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 29)
+/*lint -esym(621, OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/*lint -sem(okl4_arm_sctlr_getmmuenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setmmuenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setmmuenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_mmu_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getalignmentcheckenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setalignmentcheckenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setalignmentcheckenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_alignment_check_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdatacacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdatacacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdatacacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_data_cache_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalign) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_stack_align;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalignel0, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalignel0, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalignel0) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_stack_align_el0;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getcp15barrierenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setcp15barrierenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setcp15barrierenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_cp15_barrier_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getoklhcrel2dc, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 6;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setoklhcrel2dc, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setoklhcrel2dc) */
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 6;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_okl_hcr_el2_dc;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getitdisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setitdisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setitdisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_it_disable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getsetenddisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setsetenddisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setsetenddisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_setend_disable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusermaskaccess, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 9;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusermaskaccess, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusermaskaccess) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 9;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_user_mask_access;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved11, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 11;
+ uint32_t field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getinstructioncacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 12;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setinstructioncacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setinstructioncacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 12;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_instruction_cache_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getvectorsbit, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 13;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setvectorsbit, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setvectorsbit) */
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 13;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_vectors_bit;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdcachezero, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 14;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdcachezero, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdcachezero) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 14;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_dcache_zero;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusercachetype, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 15;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusercachetype, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusercachetype) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 15;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_user_cache_type;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfi, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfi, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfi) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_no_trap_wfi;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfe, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 18;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfe, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfe) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 18;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_no_trap_wfe;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 19;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 19;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_write_exec_never;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getuserwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 20;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setuserwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setuserwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 20;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_user_write_exec_never;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved22, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 22;
+ uint32_t field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getreserved23, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 23;
+ uint32_t field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getel0endianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setel0endianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setel0endianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_el0_endianness;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getexceptionendianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 25;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setexceptionendianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setexceptionendianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 25;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_exception_endianness;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_gettexremapenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_settexremapenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_settexremapenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_tex_remap_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getaccessflagenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 29;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setaccessflagenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setaccessflagenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 29;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_access_flag_enable;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getthumbexceptionenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setthumbexceptionenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setthumbexceptionenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_thumb_exception_enable;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x)
+{
+ *x = (okl4_arm_sctlr_t)12912928U;
+}
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_arm_sctlr_t x = (okl4_arm_sctlr_t)p;
+ if (force) {
+ x &= ~(okl4_arm_sctlr_t)0x800U;
+ x |= (okl4_arm_sctlr_t)0x800U; /* x.reserved11 */
+ x &= ~(okl4_arm_sctlr_t)0x400000U;
+ x |= (okl4_arm_sctlr_t)0x400000U; /* x.reserved22 */
+ x &= ~(okl4_arm_sctlr_t)0x800000U;
+ x |= (okl4_arm_sctlr_t)0x800000U; /* x.reserved23 */
+ }
+ return x;
+}
+
+
+
+
+typedef uint32_t okl4_arm_smccc_arch_function_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION ((okl4_arm_smccc_arch_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES ((okl4_arm_smccc_arch_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 ((okl4_arm_smccc_arch_function_t)0x8000U)
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) ||
+ (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) ||
+ (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1));
+}
+
+
+
+typedef uint32_t okl4_arm_smccc_result_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ARM_SMCCC_RESULT_SUCCESS ((okl4_arm_smccc_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED ((okl4_arm_smccc_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_ARM_SMCCC_RESULT_SUCCESS) ||
+ (var == OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED));
+}
+
+
+/**
+ The `okl4_register_t` type represents an unsigned, machine-native
+ register-sized integer value.
+*/
+
+typedef uint64_t okl4_register_t;
+
+
+
+
+
+typedef okl4_register_t okl4_atomic_raw_register_t;
+
+
+
+
+
+
+
+
+
+typedef uint16_t okl4_atomic_raw_uint16_t;
+
+
+
+
+
+typedef uint32_t okl4_atomic_raw_uint32_t;
+
+
+
+
+
+typedef uint64_t okl4_atomic_raw_uint64_t;
+
+
+
+
+
+
+
+
+
+typedef uint8_t okl4_atomic_raw_uint8_t;
+
+
+
+
+/**
+ The okl4_atomic_register_t type implements a machine-word-sized value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_register {
+ volatile okl4_atomic_raw_register_t value;
+};
+
+
+
+
+
+
+/**
+ The okl4_atomic_register_t type implements a machine-word-sized value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_register okl4_atomic_register_t;
+
+
+
+
+/**
+ The okl4_atomic_uint16_t type implements a 16-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint16 {
+ volatile okl4_atomic_raw_uint16_t value;
+};
+
+
+
+
+
+
+/**
+ The okl4_atomic_uint16_t type implements a 16-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint16 okl4_atomic_uint16_t;
+
+
+
+
+/**
+ The okl4_atomic_uint32_t type implements a 32-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint32 {
+ volatile okl4_atomic_raw_uint32_t value;
+};
+
+
+
+
+
+
+/**
+ The okl4_atomic_uint32_t type implements a 32-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint32 okl4_atomic_uint32_t;
+
+
+
+
+/**
+ The okl4_atomic_uint64_t type implements a 64-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint64 {
+ volatile okl4_atomic_raw_uint64_t value;
+};
+
+
+
+
+
+
+/**
+ The okl4_atomic_uint64_t type implements a 64-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint64 okl4_atomic_uint64_t;
+
+
+
+
+/**
+ The okl4_atomic_uint8_t type implements an 8-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint8 {
+ volatile okl4_atomic_raw_uint8_t value;
+};
+
+
+
+
+
+
+/**
+ The okl4_atomic_uint8_t type implements an 8-bit value
+ that can be operated on using atomic operations. This can be used
+ to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint8 okl4_atomic_uint8_t;
+
+
+
+
+/**
+ The `okl4_count_t` type represents a natural number of items or
+ iterations. This type is unsigned and cannot represent error values; use
+ `okl4_scount_t` if an error representation is required.
+*/
+
+typedef uint32_t okl4_count_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS ((okl4_count_t)(12U))
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK ((okl4_count_t)(1023U))
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS ((okl4_count_t)(256U))
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS ((okl4_count_t)(0xffffffffU))
+
+
+
+/**
+ The `okl4_kcap_t` type represents a kernel object capability identifier
+ (otherwise known as *designator* or *cap*) that addresses a kernel
+ capability. A capability encodes rights to perform particular operations on
+ a kernel object.
+*/
+
+typedef okl4_count_t okl4_kcap_t;
+
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID ((okl4_kcap_t)(0xffffffffU))
+
+
+
+/**
+ The `okl4_interrupt_number_t` type is an index into the interrupt ID
+ space. For platforms with a single simple interrupt controller, this is
+ the physical interrupt number. When there are multiple interrupt
+ controllers, or a large and sparse interrupt ID space, the mapping from
+ this type to the physical interrupt is defined by the KSP.
+*/
+
+typedef okl4_count_t okl4_interrupt_number_t;
+
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ ((okl4_interrupt_number_t)(1023U))
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ ((okl4_interrupt_number_t)(1023U))
+
+
+
+
+typedef okl4_interrupt_number_t okl4_irq_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_axon_data {
+ okl4_kcap_t kcap;
+ okl4_kcap_t segment;
+ okl4_irq_t virq;
+};
+
+
+
+
+/**
+ The `okl4_psize_t` type represents an unsigned integer value which is large
+ enough to represent the size of any physical memory object.
+*/
+
+typedef okl4_register_t okl4_psize_t;
+
+
+
+
+/**
+ The `okl4_lsize_t` type represents an unsigned integer value which is large
+ enough to represent the size of any guest logical memory object.
+*/
+
+typedef okl4_psize_t okl4_lsize_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE ((okl4_lsize_t)(4096U))
+
+
+
+/**
+ The `okl4_laddr_t` type represents an unsigned integer value which is large
+ enough to contain a guest logical address; that is, an address in the
+ input address space of the guest's virtual MMU. This may be larger than
+ the machine's pointer type.
+*/
+
+typedef okl4_lsize_t okl4_laddr_t;
+
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END ((okl4_laddr_t)(17592186044416U))
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_PENDING_AXON_DATA_INFO
+ - BIT 1 - @ref OKL4_MASK_FAILURE_AXON_DATA_INFO
+ - BIT 2 - @ref OKL4_MASK_USR_AXON_DATA_INFO
+ - BITS 63..3 - @ref OKL4_MASK_LADDR_AXON_DATA_INFO
+*/
+
+/*lint -esym(621, okl4_axon_data_info_t) */
+typedef okl4_laddr_t okl4_axon_data_info_t;
+
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending);
+
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure);
+
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr);
+
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr);
+
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x);
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_DATA_INFO_PENDING_MASK) */
+#define OKL4_AXON_DATA_INFO_PENDING_MASK ((okl4_axon_data_info_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_MASK_PENDING_AXON_DATA_INFO ((okl4_axon_data_info_t)1U)
+/*lint -esym(621, OKL4_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_FAILURE_MASK) */
+#define OKL4_AXON_DATA_INFO_FAILURE_MASK ((okl4_axon_data_info_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_MASK_FAILURE_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_USR_MASK) */
+#define OKL4_AXON_DATA_INFO_USR_MASK ((okl4_axon_data_info_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_MASK_USR_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_LADDR_MASK) */
+#define OKL4_AXON_DATA_INFO_LADDR_MASK ((okl4_axon_data_info_t)2305843009213693951U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_MASK_LADDR_AXON_DATA_INFO ((okl4_axon_data_info_t)2305843009213693951U << 3)
+/*lint -esym(621, OKL4_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/*lint -sem(okl4_axon_data_info_getpending, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setpending, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setpending) */
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_pending;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getfailure, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setfailure, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setfailure) */
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_failure;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getusr, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setusr, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setusr) */
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_usr;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getladdr, 1p) */
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x)
+{
+ okl4_laddr_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 3;
+ uint64_t field : 61;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_laddr_t)_conv.bits.field;
+ return (okl4_laddr_t)(field << 3);
+}
+
+/*lint -esym(714, okl4_axon_data_info_setladdr) */
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr)
+{
+ okl4_laddr_t val = _laddr >> 3;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 3;
+ uint64_t field : 61;
+ } bits;
+ okl4_axon_data_info_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)val;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x)
+{
+ *x = (okl4_axon_data_info_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force)
+{
+ okl4_axon_data_info_t x = (okl4_axon_data_info_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_axon_ep_data {
+ struct okl4_axon_data rx;
+ struct okl4_axon_data tx;
+};
+
+
+
+
+
+
+
+
+
+typedef char _okl4_padding_t;
+
+
+
+
+
+struct okl4_axon_queue {
+ uint32_t queue_offset;
+ uint16_t entries;
+ volatile uint16_t kptr;
+ volatile uint16_t uptr;
+ _okl4_padding_t __padding0_2; /**< Padding 4 */
+ _okl4_padding_t __padding1_3; /**< Padding 4 */
+};
+
+
+
+
+
+
+/**
+ The `okl4_ksize_t` type represents an unsigned integer value which is large
+ enough to represent the size of any kernel-accessible memory object.
+*/
+
+typedef okl4_lsize_t okl4_ksize_t;
+
+
+
+
+
+struct okl4_axon_queue_entry {
+ okl4_axon_data_info_t info;
+ okl4_ksize_t data_size;
+ uint32_t recv_sequence;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+};
+
+
+
+
+
+
+/**
+ - BITS 4..0 - @ref OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE
+ - BITS 12..8 - @ref OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE
+*/
+
+/*lint -esym(621, okl4_axon_queue_size_t) */
+typedef uint16_t okl4_axon_queue_size_t;
+
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order);
+
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order);
+
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x);
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK (okl4_axon_queue_size_t)(31U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U)
+/*lint -esym(621, OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK (okl4_axon_queue_size_t)(31U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U << 8)
+/*lint -esym(621, OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/*lint -sem(okl4_axon_queue_size_getallocorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x)
+{
+ okl4_count_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 5;
+ } bits;
+ okl4_axon_queue_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_count_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setallocorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setallocorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 5;
+ } bits;
+ okl4_axon_queue_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_alloc_order;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_queue_size_getminorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x)
+{
+ okl4_count_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ uint32_t field : 5;
+ } bits;
+ okl4_axon_queue_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_count_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setminorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setminorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ uint32_t field : 5;
+ } bits;
+ okl4_axon_queue_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_min_order;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x)
+{
+ *x = (okl4_axon_queue_size_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force)
+{
+ okl4_axon_queue_size_t x = (okl4_axon_queue_size_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+struct okl4_axon_rx {
+ struct okl4_axon_queue queues[4];
+ okl4_axon_queue_size_t queue_sizes[4];
+};
+
+
+
+
+
+
+
+struct okl4_axon_tx {
+ struct okl4_axon_queue queues[4];
+};
+
+
+
+
+
+
+
+typedef okl4_register_t okl4_virq_flags_t;
+
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_READY_AXON_VIRQ_FLAGS
+ - BIT 1 - @ref OKL4_MASK_FAULT_AXON_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_axon_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_axon_virq_flags_t;
+
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready);
+
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault);
+
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x);
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_READY_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_READY_MASK ((okl4_axon_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_READY_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_FAULT_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_FAULT_MASK ((okl4_axon_virq_flags_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_FAULT_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_axon_virq_flags_getready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_axon_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setready) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_axon_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_ready;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_virq_flags_getfault, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setfault, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setfault) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_axon_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_fault;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x)
+{
+ *x = (okl4_axon_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+ okl4_axon_virq_flags_t x = (okl4_axon_virq_flags_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ The `okl4_page_cache_t` object represents a set of attributes that
+ controls the caching behaviour of memory page mappings.
+
+ - @ref OKL4_PAGE_CACHE_WRITECOMBINE
+ - @ref OKL4_PAGE_CACHE_DEFAULT
+ - @ref OKL4_PAGE_CACHE_IPC_RX
+ - @ref OKL4_PAGE_CACHE_IPC_TX
+ - @ref OKL4_PAGE_CACHE_TRACEBUFFER
+ - @ref OKL4_PAGE_CACHE_WRITEBACK
+ - @ref OKL4_PAGE_CACHE_IWB_RWA_ONC
+ - @ref OKL4_PAGE_CACHE_WRITETHROUGH
+ - @ref OKL4_PAGE_CACHE_DEVICE_GRE
+ - @ref OKL4_PAGE_CACHE_DEVICE_NGRE
+ - @ref OKL4_PAGE_CACHE_DEVICE
+ - @ref OKL4_PAGE_CACHE_STRONG
+ - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE
+ - @ref OKL4_PAGE_CACHE_HW_MASK
+ - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRE
+ - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGRE
+ - @ref OKL4_PAGE_CACHE_HW_DEVICE_GRE
+ - @ref OKL4_PAGE_CACHE_HW_TWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_NC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RWA_NSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_NC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RWA_OSH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_NC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_HW_WB_RWA_ISH
+ - @ref OKL4_PAGE_CACHE_MAX
+ - @ref OKL4_PAGE_CACHE_INVALID
+*/
+
+typedef okl4_count_t okl4_page_cache_t;
+
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_PAGE_CACHE_WRITECOMBINE ((okl4_page_cache_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEFAULT) */
+#define OKL4_PAGE_CACHE_DEFAULT ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_RX) */
+#define OKL4_PAGE_CACHE_IPC_RX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_TX) */
+#define OKL4_PAGE_CACHE_IPC_TX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_PAGE_CACHE_TRACEBUFFER ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITEBACK) */
+#define OKL4_PAGE_CACHE_WRITEBACK ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_PAGE_CACHE_IWB_RWA_ONC ((okl4_page_cache_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_PAGE_CACHE_WRITETHROUGH ((okl4_page_cache_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_DEVICE_GRE ((okl4_page_cache_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_DEVICE_NGRE ((okl4_page_cache_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE) */
+#define OKL4_PAGE_CACHE_DEVICE ((okl4_page_cache_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_STRONG) */
+#define OKL4_PAGE_CACHE_STRONG ((okl4_page_cache_t)0x7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_MASK) */
+#define OKL4_PAGE_CACHE_HW_MASK ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRE ((okl4_page_cache_t)0x8000004U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGRE ((okl4_page_cache_t)0x8000008U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_GRE ((okl4_page_cache_t)0x800000cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_NSH ((okl4_page_cache_t)0x8000011U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000012U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000013U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH ((okl4_page_cache_t)0x8000014U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000015U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000016U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000017U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000018U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000019U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000021U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_NSH ((okl4_page_cache_t)0x8000022U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000023U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH ((okl4_page_cache_t)0x8000024U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000025U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000026U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000027U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000028U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000029U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000031U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000032U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_NSH ((okl4_page_cache_t)0x8000033U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000034U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000035U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000036U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000037U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000038U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000039U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000041U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH ((okl4_page_cache_t)0x8000042U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH ((okl4_page_cache_t)0x8000043U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_PAGE_CACHE_HW_NC_NSH ((okl4_page_cache_t)0x8000044U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH ((okl4_page_cache_t)0x8000045U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH ((okl4_page_cache_t)0x8000046U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH ((okl4_page_cache_t)0x8000047U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH ((okl4_page_cache_t)0x8000048U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000049U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH ((okl4_page_cache_t)0x800004aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH ((okl4_page_cache_t)0x800004bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH ((okl4_page_cache_t)0x800004cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH ((okl4_page_cache_t)0x800004dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH ((okl4_page_cache_t)0x800004eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH ((okl4_page_cache_t)0x800004fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000051U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000052U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000053U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH ((okl4_page_cache_t)0x8000054U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_NSH ((okl4_page_cache_t)0x8000055U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000056U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000057U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000058U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000059U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000061U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000062U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000063U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH ((okl4_page_cache_t)0x8000064U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000065U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_NSH ((okl4_page_cache_t)0x8000066U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000067U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000068U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000069U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000071U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000072U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000073U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000074U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000075U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000076U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_NSH ((okl4_page_cache_t)0x8000077U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000078U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000079U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000081U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000082U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000083U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH ((okl4_page_cache_t)0x8000084U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000085U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000086U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000087U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_NSH ((okl4_page_cache_t)0x8000088U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000089U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH ((okl4_page_cache_t)0x800008cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x800008dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000091U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000092U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000093U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH ((okl4_page_cache_t)0x8000094U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000095U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000096U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000097U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH ((okl4_page_cache_t)0x8000098U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_NSH ((okl4_page_cache_t)0x8000099U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH ((okl4_page_cache_t)0x800009cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x800009dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH ((okl4_page_cache_t)0x80000a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_NSH ((okl4_page_cache_t)0x80000aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_NSH ((okl4_page_cache_t)0x80000bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH ((okl4_page_cache_t)0x80000c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_NSH ((okl4_page_cache_t)0x80000ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH ((okl4_page_cache_t)0x80000d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_NSH ((okl4_page_cache_t)0x80000ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH ((okl4_page_cache_t)0x80000e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_NSH ((okl4_page_cache_t)0x80000eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_NSH ((okl4_page_cache_t)0x80000ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_OSH ((okl4_page_cache_t)0x8000211U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000212U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000213U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH ((okl4_page_cache_t)0x8000214U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000215U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000216U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000217U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000218U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000219U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000221U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_OSH ((okl4_page_cache_t)0x8000222U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000223U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH ((okl4_page_cache_t)0x8000224U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000225U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000226U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000227U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000228U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000229U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000231U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000232U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_OSH ((okl4_page_cache_t)0x8000233U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000234U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000235U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000236U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000237U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000238U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000239U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000241U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH ((okl4_page_cache_t)0x8000242U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH ((okl4_page_cache_t)0x8000243U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_PAGE_CACHE_HW_NC_OSH ((okl4_page_cache_t)0x8000244U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH ((okl4_page_cache_t)0x8000245U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH ((okl4_page_cache_t)0x8000246U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH ((okl4_page_cache_t)0x8000247U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH ((okl4_page_cache_t)0x8000248U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000249U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH ((okl4_page_cache_t)0x800024aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH ((okl4_page_cache_t)0x800024bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH ((okl4_page_cache_t)0x800024cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH ((okl4_page_cache_t)0x800024dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH ((okl4_page_cache_t)0x800024eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH ((okl4_page_cache_t)0x800024fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000251U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000252U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000253U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH ((okl4_page_cache_t)0x8000254U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_OSH ((okl4_page_cache_t)0x8000255U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000256U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000257U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000258U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000259U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000261U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000262U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000263U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH ((okl4_page_cache_t)0x8000264U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000265U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_OSH ((okl4_page_cache_t)0x8000266U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000267U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000268U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000269U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000271U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000272U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000273U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000274U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000275U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000276U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_OSH ((okl4_page_cache_t)0x8000277U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000278U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000279U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000281U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000282U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000283U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH ((okl4_page_cache_t)0x8000284U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000285U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000286U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000287U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_OSH ((okl4_page_cache_t)0x8000288U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000289U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH ((okl4_page_cache_t)0x800028cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x800028dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000291U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000292U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000293U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH ((okl4_page_cache_t)0x8000294U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000295U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000296U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000297U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH ((okl4_page_cache_t)0x8000298U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_OSH ((okl4_page_cache_t)0x8000299U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH ((okl4_page_cache_t)0x800029cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x800029dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH ((okl4_page_cache_t)0x80002a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_OSH ((okl4_page_cache_t)0x80002aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_OSH ((okl4_page_cache_t)0x80002bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH ((okl4_page_cache_t)0x80002c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_OSH ((okl4_page_cache_t)0x80002ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH ((okl4_page_cache_t)0x80002d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_OSH ((okl4_page_cache_t)0x80002ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH ((okl4_page_cache_t)0x80002e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_OSH ((okl4_page_cache_t)0x80002eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_OSH ((okl4_page_cache_t)0x80002ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_ISH ((okl4_page_cache_t)0x8000311U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000312U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000313U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH ((okl4_page_cache_t)0x8000314U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000315U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000316U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000317U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000318U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000319U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000321U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_ISH ((okl4_page_cache_t)0x8000322U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000323U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH ((okl4_page_cache_t)0x8000324U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000325U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000326U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000327U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000328U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000329U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000331U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000332U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_ISH ((okl4_page_cache_t)0x8000333U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000334U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000335U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000336U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000337U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000338U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000339U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000341U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH ((okl4_page_cache_t)0x8000342U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH ((okl4_page_cache_t)0x8000343U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_PAGE_CACHE_HW_NC_ISH ((okl4_page_cache_t)0x8000344U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH ((okl4_page_cache_t)0x8000345U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH ((okl4_page_cache_t)0x8000346U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH ((okl4_page_cache_t)0x8000347U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH ((okl4_page_cache_t)0x8000348U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000349U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH ((okl4_page_cache_t)0x800034aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH ((okl4_page_cache_t)0x800034bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH ((okl4_page_cache_t)0x800034cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH ((okl4_page_cache_t)0x800034dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH ((okl4_page_cache_t)0x800034eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH ((okl4_page_cache_t)0x800034fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000351U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000352U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000353U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH ((okl4_page_cache_t)0x8000354U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_ISH ((okl4_page_cache_t)0x8000355U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000356U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000357U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000358U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000359U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000361U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000362U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000363U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH ((okl4_page_cache_t)0x8000364U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000365U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_ISH ((okl4_page_cache_t)0x8000366U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000367U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000368U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000369U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000371U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000372U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000373U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000374U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000375U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000376U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_ISH ((okl4_page_cache_t)0x8000377U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000378U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000379U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000381U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000382U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000383U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH ((okl4_page_cache_t)0x8000384U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000385U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000386U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000387U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_ISH ((okl4_page_cache_t)0x8000388U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000389U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH ((okl4_page_cache_t)0x800038cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x800038dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000391U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000392U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000393U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH ((okl4_page_cache_t)0x8000394U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000395U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000396U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000397U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH ((okl4_page_cache_t)0x8000398U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_ISH ((okl4_page_cache_t)0x8000399U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH ((okl4_page_cache_t)0x800039cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x800039dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH ((okl4_page_cache_t)0x80003a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_ISH ((okl4_page_cache_t)0x80003aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_ISH ((okl4_page_cache_t)0x80003bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH ((okl4_page_cache_t)0x80003c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_ISH ((okl4_page_cache_t)0x80003ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH ((okl4_page_cache_t)0x80003d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_ISH ((okl4_page_cache_t)0x80003ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH ((okl4_page_cache_t)0x80003e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_ISH ((okl4_page_cache_t)0x80003eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_ISH ((okl4_page_cache_t)0x80003ffU)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_MAX) */
+#define OKL4_PAGE_CACHE_MAX ((okl4_page_cache_t)0x80003ffU)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_INVALID) */
+#define OKL4_PAGE_CACHE_INVALID ((okl4_page_cache_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var);
+
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_PAGE_CACHE_WRITECOMBINE) ||
+ (var == OKL4_PAGE_CACHE_DEFAULT) ||
+ (var == OKL4_PAGE_CACHE_IPC_RX) ||
+ (var == OKL4_PAGE_CACHE_IPC_TX) ||
+ (var == OKL4_PAGE_CACHE_TRACEBUFFER) ||
+ (var == OKL4_PAGE_CACHE_WRITEBACK) ||
+ (var == OKL4_PAGE_CACHE_IWB_RWA_ONC) ||
+ (var == OKL4_PAGE_CACHE_WRITETHROUGH) ||
+ (var == OKL4_PAGE_CACHE_DEVICE_GRE) ||
+ (var == OKL4_PAGE_CACHE_DEVICE_NGRE) ||
+ (var == OKL4_PAGE_CACHE_DEVICE) ||
+ (var == OKL4_PAGE_CACHE_STRONG) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_DEVICE_GRE) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_NC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_DEVICE_NGRE) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_NC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_NC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_TWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_WT_RWA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) ||
+ (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) ||
+ (var == OKL4_PAGE_CACHE_HW_MASK));
+}
+
+
+
+typedef uint32_t okl4_cell_id_t;
+
+
+
+
+
+typedef char okl4_char_t;
+
+
+
+
+
+
+
+
+/**
+ The `okl4_string_t` type represents a constant C string of type
+ 'const char *'.
+*/
+
+typedef const okl4_char_t *okl4_string_t;
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_range_item {
+ okl4_laddr_t base;
+ okl4_lsize_t size;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_item {
+ struct okl4_range_item range;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_item {
+ okl4_laddr_t entry;
+ struct okl4_virtmem_item mapping_range;
+ __ptr64(void *, data);
+ __ptr64(okl4_string_t, image);
+ okl4_kcap_t mmu;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(okl4_string_t, name);
+ okl4_kcap_t registers_cap;
+ okl4_kcap_t reset_virq;
+ okl4_count_t segment_index;
+ _okl4_padding_t __padding4_4;
+ _okl4_padding_t __padding5_5;
+ _okl4_padding_t __padding6_6;
+ _okl4_padding_t __padding7_7;
+ __ptr64(struct okl4_cell_management_segments *, segments);
+ __ptr64(struct okl4_cell_management_vcpus *, vcpus);
+ okl4_bool_t boot_once;
+ okl4_bool_t can_stop;
+ okl4_bool_t deferred;
+ okl4_bool_t detached;
+ okl4_bool_t erase;
+ _okl4_padding_t __padding8_5;
+ _okl4_padding_t __padding9_6;
+ _okl4_padding_t __padding10_7;
+ okl4_laddr_t dtb_address;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management {
+ okl4_count_t num_items;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ struct okl4_cell_management_item items[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ The `okl4_paddr_t` type represents an unsigned integer value which is large
+ enough to contain a machine-native physical address.
+*/
+
+typedef okl4_psize_t okl4_paddr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_segment_mapping {
+ okl4_paddr_t phys_addr;
+ okl4_psize_t size;
+ okl4_laddr_t virt_addr;
+ okl4_kcap_t cap;
+ okl4_bool_t device;
+ okl4_bool_t owned;
+ _okl4_padding_t __padding0_6;
+ _okl4_padding_t __padding1_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_segments {
+ okl4_count_t free_segments;
+ okl4_count_t num_segments;
+ struct okl4_segment_mapping segment_mappings[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_vcpus {
+ okl4_count_t num_vcpus;
+ okl4_kcap_t vcpu_caps[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ CPU instruction set
+*/
+
+typedef uint32_t okl4_cpu_exec_mode;
+
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE ((okl4_cpu_exec_mode)(0U))
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE ((okl4_cpu_exec_mode)(4U))
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE ((okl4_cpu_exec_mode)(2U))
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE ((okl4_cpu_exec_mode)(3U))
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE ((okl4_cpu_exec_mode)(1U))
+
+
+
+/**
+ CPU mode specifier
+
+ - BITS 2..0 - @ref OKL4_MASK_EXEC_MODE_CPU_MODE
+ - BIT 7 - @ref OKL4_MASK_ENDIAN_CPU_MODE
+*/
+
+/*lint -esym(621, okl4_cpu_mode_t) */
+typedef uint32_t okl4_cpu_mode_t;
+
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode);
+
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian);
+
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x);
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_CPU_MODE_EXEC_MODE_MASK) */
+#define OKL4_CPU_MODE_EXEC_MODE_MASK ((okl4_cpu_mode_t)7U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_MASK_EXEC_MODE_CPU_MODE ((okl4_cpu_mode_t)7U)
+/*lint -esym(621, OKL4_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_CPU_MODE_ENDIAN_MASK) */
+#define OKL4_CPU_MODE_ENDIAN_MASK ((okl4_cpu_mode_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_MASK_ENDIAN_CPU_MODE ((okl4_cpu_mode_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/*lint -sem(okl4_cpu_mode_getexecmode, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x)
+{
+ okl4_cpu_exec_mode field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 3;
+ } bits;
+ okl4_cpu_mode_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_cpu_exec_mode)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setexecmode, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_cpu_mode_setexecmode) */
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 3;
+ } bits;
+ okl4_cpu_mode_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_exec_mode;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_cpu_mode_getendian, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_cpu_mode_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setendian, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_cpu_mode_setendian) */
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_cpu_mode_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_endian;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x)
+{
+ *x = (okl4_cpu_mode_t)0U;
+}
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_cpu_mode_t x = (okl4_cpu_mode_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+struct _okl4_env_hdr {
+ uint16_t magic;
+ uint16_t count;
+};
+
+
+
+
+
+
+
+struct _okl4_env_item {
+ __ptr64(okl4_string_t, name);
+ __ptr64(void *, item);
+};
+
+
+
+
+
+
+/**
+ The OKL4 environment. It is a dictionary that maps strings to
+ arbitary objects. The content of the environment is defined
+ during system construction time, and is read-only during run
+ time.
+*/
+
+struct _okl4_env {
+ struct _okl4_env_hdr env_hdr;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ struct _okl4_env_item env_item[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_cell {
+ __ptr64(okl4_string_t, name);
+ okl4_count_t num_entries;
+ okl4_count_t start_entry;
+};
+
+
+
+
+/**
+ The okl4_page_perms_t object represents a set of access permissions for
+ page mappings.
+
+ - @ref OKL4_PAGE_PERMS_NONE
+ - @ref OKL4_PAGE_PERMS_X
+ - @ref OKL4_PAGE_PERMS_W
+ - @ref OKL4_PAGE_PERMS_WX
+ - @ref OKL4_PAGE_PERMS_R
+ - @ref OKL4_PAGE_PERMS_RX
+ - @ref OKL4_PAGE_PERMS_RW
+ - @ref OKL4_PAGE_PERMS_RWX
+ - @ref OKL4_PAGE_PERMS_MAX
+ - @ref OKL4_PAGE_PERMS_INVALID
+*/
+
+typedef uint32_t okl4_page_perms_t;
+
+/*lint -esym(621, OKL4_PAGE_PERMS_NONE) */
+#define OKL4_PAGE_PERMS_NONE ((okl4_page_perms_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_PERMS_X) */
+#define OKL4_PAGE_PERMS_X ((okl4_page_perms_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_PERMS_W) */
+#define OKL4_PAGE_PERMS_W ((okl4_page_perms_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_PERMS_WX) */
+#define OKL4_PAGE_PERMS_WX ((okl4_page_perms_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_PERMS_R) */
+#define OKL4_PAGE_PERMS_R ((okl4_page_perms_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RX) */
+#define OKL4_PAGE_PERMS_RX ((okl4_page_perms_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RW) */
+#define OKL4_PAGE_PERMS_RW ((okl4_page_perms_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RWX) */
+#define OKL4_PAGE_PERMS_RWX ((okl4_page_perms_t)0x7U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_MAX) */
+#define OKL4_PAGE_PERMS_MAX ((okl4_page_perms_t)0x7U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_INVALID) */
+#define OKL4_PAGE_PERMS_INVALID ((okl4_page_perms_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var);
+
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_PAGE_PERMS_NONE) ||
+ (var == OKL4_PAGE_PERMS_X) ||
+ (var == OKL4_PAGE_PERMS_W) ||
+ (var == OKL4_PAGE_PERMS_WX) ||
+ (var == OKL4_PAGE_PERMS_R) ||
+ (var == OKL4_PAGE_PERMS_RX) ||
+ (var == OKL4_PAGE_PERMS_RW) ||
+ (var == OKL4_PAGE_PERMS_RWX));
+}
+
+
+/**
+
+*/
+
+struct okl4_env_access_entry {
+ okl4_laddr_t virtual_address;
+ okl4_psize_t offset;
+ okl4_psize_t size;
+ okl4_count_t num_segs;
+ okl4_count_t segment_index;
+ okl4_page_cache_t cache_attrs;
+ okl4_page_perms_t permissions;
+ __ptr64(okl4_string_t, object_name);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_table {
+ okl4_count_t num_cells;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(struct okl4_env_access_cell *, cells);
+ __ptr64(struct okl4_env_access_entry *, entries);
+};
+
+
+
+
+/**
+ This object contains command-line arguments passed to
+ user-level programs.
+*/
+
+struct okl4_env_args {
+ okl4_count_t argc;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64_array(okl4_string_t, argv)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ The okl4_env_interrupt_device_map_t type represents a list of interrupt
+ numbers (IRQs) that are connected to a given peripheral
+ device. Objects of this type are typically obtained from
+ the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_device_map {
+ okl4_count_t num_entries;
+ okl4_interrupt_number_t entries[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ The okl4_interrupt_t structure is used to represent a kernel interrupt
+ object.
+*/
+
+struct okl4_interrupt {
+ okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+ The okl4_env_interrupt_handle_t type stores the information required to
+ perform operations on a interrupt.
+*/
+
+struct okl4_env_interrupt_handle {
+ okl4_interrupt_number_t descriptor;
+ struct okl4_interrupt interrupt;
+};
+
+
+
+
+/**
+ The okl4_env_interrupt_list_t type stores a list of interrupt handle objects
+ which represent all the interrupts that are available to the cell.
+ Objects of this type are typically obtained from
+ the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_list {
+ okl4_count_t num_entries;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(okl4_interrupt_number_t *, descriptor);
+ __ptr64(struct okl4_interrupt *, interrupt);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cell {
+ okl4_char_t name[32];
+ okl4_count_t num_cores;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(struct okl4_env_profile_cpu *, core);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cpu {
+ okl4_kcap_t cap;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_table {
+ okl4_count_t num_cell_entries;
+ okl4_count_t pcpu_cell_entry;
+ __ptr64(struct okl4_env_profile_cell *, cells);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment {
+ okl4_paddr_t base;
+ okl4_psize_t size;
+ okl4_kcap_t cap_id;
+ okl4_page_perms_t rwx;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment_table {
+ okl4_count_t num_segments;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ struct okl4_env_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ The `okl4_error_t` type represents an error condition returned by the
+ OKL4 API.
+
+ See OKL4_ERROR_*
+
+ - @ref OKL4_ERROR_KSP_OK
+ - @ref OKL4_ERROR_OK
+ - @ref OKL4_ERROR_ALREADY_STARTED
+ - @ref OKL4_ERROR_ALREADY_STOPPED
+ - @ref OKL4_ERROR_AXON_AREA_TOO_BIG
+ - @ref OKL4_ERROR_AXON_BAD_MESSAGE_SIZE
+ - @ref OKL4_ERROR_AXON_INVALID_OFFSET
+ - @ref OKL4_ERROR_AXON_QUEUE_NOT_MAPPED
+ - @ref OKL4_ERROR_AXON_QUEUE_NOT_READY
+ - @ref OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED
+ - @ref OKL4_ERROR_CANCELLED
+ - @ref OKL4_ERROR_EXISTING_MAPPING
+ - @ref OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS
+ - @ref OKL4_ERROR_INTERRUPTED
+ - @ref OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED
+ - @ref OKL4_ERROR_INTERRUPT_INVALID_IRQ
+ - @ref OKL4_ERROR_INTERRUPT_NOT_ATTACHED
+ - @ref OKL4_ERROR_INVALID_ARGUMENT
+ - @ref OKL4_ERROR_INVALID_DESIGNATOR
+ - @ref OKL4_ERROR_INVALID_POWER_STATE
+ - @ref OKL4_ERROR_INVALID_SEGMENT_INDEX
+ - @ref OKL4_ERROR_MEMORY_FAULT
+ - @ref OKL4_ERROR_MISSING_MAPPING
+ - @ref OKL4_ERROR_NON_EMPTY_MMU_CONTEXT
+ - @ref OKL4_ERROR_NOT_IN_SEGMENT
+ - @ref OKL4_ERROR_NOT_LAST_CPU
+ - @ref OKL4_ERROR_NO_RESOURCES
+ - @ref OKL4_ERROR_PIPE_BAD_STATE
+ - @ref OKL4_ERROR_PIPE_EMPTY
+ - @ref OKL4_ERROR_PIPE_FULL
+ - @ref OKL4_ERROR_PIPE_NOT_READY
+ - @ref OKL4_ERROR_PIPE_RECV_OVERFLOW
+ - @ref OKL4_ERROR_POWER_VCPU_RESUMED
+ - @ref OKL4_ERROR_SEGMENT_USED
+ - @ref OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED
+ - @ref OKL4_ERROR_TIMER_ACTIVE
+ - @ref OKL4_ERROR_TIMER_CANCELLED
+ - @ref OKL4_ERROR_TRY_AGAIN
+ - @ref OKL4_ERROR_WOULD_BLOCK
+ - @ref OKL4_ERROR_ALLOC_EXHAUSTED
+ - @ref OKL4_ERROR_KSP_ERROR_0
+ - @ref OKL4_ERROR_KSP_ERROR_1
+ - @ref OKL4_ERROR_KSP_ERROR_2
+ - @ref OKL4_ERROR_KSP_ERROR_3
+ - @ref OKL4_ERROR_KSP_ERROR_4
+ - @ref OKL4_ERROR_KSP_ERROR_5
+ - @ref OKL4_ERROR_KSP_ERROR_6
+ - @ref OKL4_ERROR_KSP_ERROR_7
+ - @ref OKL4_ERROR_KSP_INVALID_ARG
+ - @ref OKL4_ERROR_KSP_NOT_IMPLEMENTED
+ - @ref OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS
+ - @ref OKL4_ERROR_KSP_INTERRUPT_REGISTERED
+ - @ref OKL4_ERROR_NOT_IMPLEMENTED
+ - @ref OKL4_ERROR_MAX
+*/
+
+typedef uint32_t okl4_error_t;
+
+/**
+ KSP returned OK
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_OK) */
+#define OKL4_ERROR_KSP_OK ((okl4_error_t)0x0U)
+/**
+ The operation succeeded
+*/
+/*lint -esym(621, OKL4_ERROR_OK) */
+#define OKL4_ERROR_OK ((okl4_error_t)0x0U)
+/**
+ The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STARTED) */
+#define OKL4_ERROR_ALREADY_STARTED ((okl4_error_t)0x1U)
+/**
+ The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STOPPED) */
+#define OKL4_ERROR_ALREADY_STOPPED ((okl4_error_t)0x2U)
+/*lint -esym(621, OKL4_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ERROR_AXON_AREA_TOO_BIG ((okl4_error_t)0x3U)
+/*lint -esym(621, OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ERROR_AXON_BAD_MESSAGE_SIZE ((okl4_error_t)0x4U)
+/*lint -esym(621, OKL4_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ERROR_AXON_INVALID_OFFSET ((okl4_error_t)0x5U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_MAPPED ((okl4_error_t)0x6U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_READY ((okl4_error_t)0x7U)
+/*lint -esym(621, OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED ((okl4_error_t)0x8U)
+/**
+ A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_CANCELLED) */
+#define OKL4_ERROR_CANCELLED ((okl4_error_t)0x9U)
+/**
+ The operation failed due to an existing mapping. Mapping
+ operations must not overlap an existing mapping. Unmapping
+ must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ERROR_EXISTING_MAPPING) */
+#define OKL4_ERROR_EXISTING_MAPPING ((okl4_error_t)0xaU)
+/**
+ The operation requested with a segment failed due to
+ insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS ((okl4_error_t)0xbU)
+/**
+ The operation did not complete because it was interrupted by a
+ preemption. This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPTED) */
+#define OKL4_ERROR_INTERRUPTED ((okl4_error_t)0xcU)
+/**
+ Attempt to attach an interrupt to an IRQ number, when the
+ interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED ((okl4_error_t)0xdU)
+/**
+ Attempt to use an IRQ number that is out of range, of
+ the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ERROR_INTERRUPT_INVALID_IRQ ((okl4_error_t)0xeU)
+/**
+ Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_NOT_ATTACHED ((okl4_error_t)0xfU)
+/**
+ An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ERROR_INVALID_ARGUMENT ((okl4_error_t)0x10U)
+/**
+ The operation failed because one of the arguments does not refer to a
+ valid object.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ERROR_INVALID_DESIGNATOR ((okl4_error_t)0x11U)
+/**
+ The operation failed because the power_state
+ argument is invalid.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ERROR_INVALID_POWER_STATE ((okl4_error_t)0x12U)
+/**
+ The operation failed because the given segment index does
+ not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ERROR_INVALID_SEGMENT_INDEX ((okl4_error_t)0x13U)
+/**
+ A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_MEMORY_FAULT) */
+#define OKL4_ERROR_MEMORY_FAULT ((okl4_error_t)0x14U)
+/**
+ The operation failed because there is no mapping at the
+ specified location.
+*/
+/*lint -esym(621, OKL4_ERROR_MISSING_MAPPING) */
+#define OKL4_ERROR_MISSING_MAPPING ((okl4_error_t)0x15U)
+/**
+ The delete operation failed because the KMMU context is not
+ empty.
+*/
+/*lint -esym(621, OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ERROR_NON_EMPTY_MMU_CONTEXT ((okl4_error_t)0x16U)
+/**
+ The lookup operation failed because the given virtual address
+ of the given KMMU context is not mapped at the given physical
+ segment.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ERROR_NOT_IN_SEGMENT ((okl4_error_t)0x17U)
+/**
+ The operation failed because the caller is not on the last
+ online cpu.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_LAST_CPU) */
+#define OKL4_ERROR_NOT_LAST_CPU ((okl4_error_t)0x18U)
+/**
+ Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_NO_RESOURCES) */
+#define OKL4_ERROR_NO_RESOURCES ((okl4_error_t)0x19U)
+/**
+ Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ERROR_PIPE_BAD_STATE ((okl4_error_t)0x1aU)
+/**
+ Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_EMPTY) */
+#define OKL4_ERROR_PIPE_EMPTY ((okl4_error_t)0x1bU)
+/**
+ Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_FULL) */
+#define OKL4_ERROR_PIPE_FULL ((okl4_error_t)0x1cU)
+/**
+ Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_NOT_READY) */
+#define OKL4_ERROR_PIPE_NOT_READY ((okl4_error_t)0x1dU)
+/**
+ Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ERROR_PIPE_RECV_OVERFLOW ((okl4_error_t)0x1eU)
+/**
+ The operation failed because at least one VCPU has a monitored
+ power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ERROR_POWER_VCPU_RESUMED ((okl4_error_t)0x1fU)
+/**
+ The operation requires a segment to be unused, or not attached
+ to an MMU context.
+*/
+/*lint -esym(621, OKL4_ERROR_SEGMENT_USED) */
+#define OKL4_ERROR_SEGMENT_USED ((okl4_error_t)0x20U)
+/*lint -esym(621, OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED ((okl4_error_t)0x21U)
+/**
+ The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_ACTIVE) */
+#define OKL4_ERROR_TIMER_ACTIVE ((okl4_error_t)0x22U)
+/**
+ The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_CANCELLED) */
+#define OKL4_ERROR_TIMER_CANCELLED ((okl4_error_t)0x23U)
+/**
+ Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ERROR_TRY_AGAIN) */
+#define OKL4_ERROR_TRY_AGAIN ((okl4_error_t)0x24U)
+/**
+ The non-blocking operation failed because it would
+ block on a resource.
+*/
+/*lint -esym(621, OKL4_ERROR_WOULD_BLOCK) */
+#define OKL4_ERROR_WOULD_BLOCK ((okl4_error_t)0x25U)
+/**
+ Insufficient resources
+*/
+/*lint -esym(621, OKL4_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ERROR_ALLOC_EXHAUSTED ((okl4_error_t)0x26U)
+/**
+ KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_0) */
+#define OKL4_ERROR_KSP_ERROR_0 ((okl4_error_t)0x10000010U)
+/**
+ KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_1) */
+#define OKL4_ERROR_KSP_ERROR_1 ((okl4_error_t)0x10000011U)
+/**
+ KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_2) */
+#define OKL4_ERROR_KSP_ERROR_2 ((okl4_error_t)0x10000012U)
+/**
+ KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_3) */
+#define OKL4_ERROR_KSP_ERROR_3 ((okl4_error_t)0x10000013U)
+/**
+ KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_4) */
+#define OKL4_ERROR_KSP_ERROR_4 ((okl4_error_t)0x10000014U)
+/**
+ KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_5) */
+#define OKL4_ERROR_KSP_ERROR_5 ((okl4_error_t)0x10000015U)
+/**
+ KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_6) */
+#define OKL4_ERROR_KSP_ERROR_6 ((okl4_error_t)0x10000016U)
+/**
+ KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_7) */
+#define OKL4_ERROR_KSP_ERROR_7 ((okl4_error_t)0x10000017U)
+/**
+ Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ERROR_KSP_INVALID_ARG ((okl4_error_t)0x80000001U)
+/**
+ KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_KSP_NOT_IMPLEMENTED ((okl4_error_t)0x80000002U)
+/**
+ User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS ((okl4_error_t)0x80000003U)
+/**
+ Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ERROR_KSP_INTERRUPT_REGISTERED ((okl4_error_t)0x80000004U)
+/**
+ Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_NOT_IMPLEMENTED ((okl4_error_t)0xffffffffU)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ERROR_MAX) */
+#define OKL4_ERROR_MAX ((okl4_error_t)0xffffffffU)
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var);
+
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_ERROR_ALREADY_STARTED) ||
+ (var == OKL4_ERROR_ALREADY_STOPPED) ||
+ (var == OKL4_ERROR_AXON_AREA_TOO_BIG) ||
+ (var == OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) ||
+ (var == OKL4_ERROR_AXON_INVALID_OFFSET) ||
+ (var == OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) ||
+ (var == OKL4_ERROR_AXON_QUEUE_NOT_READY) ||
+ (var == OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) ||
+ (var == OKL4_ERROR_CANCELLED) ||
+ (var == OKL4_ERROR_EXISTING_MAPPING) ||
+ (var == OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) ||
+ (var == OKL4_ERROR_INTERRUPTED) ||
+ (var == OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) ||
+ (var == OKL4_ERROR_INTERRUPT_INVALID_IRQ) ||
+ (var == OKL4_ERROR_INTERRUPT_NOT_ATTACHED) ||
+ (var == OKL4_ERROR_INVALID_ARGUMENT) ||
+ (var == OKL4_ERROR_INVALID_DESIGNATOR) ||
+ (var == OKL4_ERROR_INVALID_POWER_STATE) ||
+ (var == OKL4_ERROR_INVALID_SEGMENT_INDEX) ||
+ (var == OKL4_ERROR_KSP_ERROR_0) ||
+ (var == OKL4_ERROR_KSP_ERROR_1) ||
+ (var == OKL4_ERROR_KSP_ERROR_2) ||
+ (var == OKL4_ERROR_KSP_ERROR_3) ||
+ (var == OKL4_ERROR_KSP_ERROR_4) ||
+ (var == OKL4_ERROR_KSP_ERROR_5) ||
+ (var == OKL4_ERROR_KSP_ERROR_6) ||
+ (var == OKL4_ERROR_KSP_ERROR_7) ||
+ (var == OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) ||
+ (var == OKL4_ERROR_KSP_INTERRUPT_REGISTERED) ||
+ (var == OKL4_ERROR_KSP_INVALID_ARG) ||
+ (var == OKL4_ERROR_KSP_NOT_IMPLEMENTED) ||
+ (var == OKL4_ERROR_KSP_OK) ||
+ (var == OKL4_ERROR_MEMORY_FAULT) ||
+ (var == OKL4_ERROR_MISSING_MAPPING) ||
+ (var == OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) ||
+ (var == OKL4_ERROR_NOT_IMPLEMENTED) ||
+ (var == OKL4_ERROR_NOT_IN_SEGMENT) ||
+ (var == OKL4_ERROR_NOT_LAST_CPU) ||
+ (var == OKL4_ERROR_NO_RESOURCES) ||
+ (var == OKL4_ERROR_OK) ||
+ (var == OKL4_ERROR_PIPE_BAD_STATE) ||
+ (var == OKL4_ERROR_PIPE_EMPTY) ||
+ (var == OKL4_ERROR_PIPE_FULL) ||
+ (var == OKL4_ERROR_PIPE_NOT_READY) ||
+ (var == OKL4_ERROR_PIPE_RECV_OVERFLOW) ||
+ (var == OKL4_ERROR_POWER_VCPU_RESUMED) ||
+ (var == OKL4_ERROR_SEGMENT_USED) ||
+ (var == OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) ||
+ (var == OKL4_ERROR_TIMER_ACTIVE) ||
+ (var == OKL4_ERROR_TIMER_CANCELLED) ||
+ (var == OKL4_ERROR_TRY_AGAIN) ||
+ (var == OKL4_ERROR_WOULD_BLOCK) ||
+ (var == OKL4_ERROR_ALLOC_EXHAUSTED));
+}
+
+
+/**
+
+*/
+
+struct okl4_firmware_segment {
+ okl4_laddr_t copy_addr;
+ okl4_laddr_t exec_addr;
+ okl4_lsize_t filesz;
+ okl4_lsize_t memsz_diff;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_firmware_segments_info {
+ okl4_count_t num_segments;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ struct okl4_firmware_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+ - BIT 1 - @ref OKL4_MASK_EDGE_GICD_ICFGR
+*/
+
+/*lint -esym(621, okl4_gicd_icfgr_t) */
+typedef uint32_t okl4_gicd_icfgr_t;
+
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x);
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge);
+
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x);
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_ICFGR_EDGE_MASK) */
+#define OKL4_GICD_ICFGR_EDGE_MASK ((okl4_gicd_icfgr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_MASK_EDGE_GICD_ICFGR ((okl4_gicd_icfgr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/*lint -sem(okl4_gicd_icfgr_getedge, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_gicd_icfgr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_gicd_icfgr_setedge, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_icfgr_setedge) */
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_gicd_icfgr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_edge;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x)
+{
+ *x = (okl4_gicd_icfgr_t)0U;
+}
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_gicd_icfgr_t x = (okl4_gicd_icfgr_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+typedef uint32_t okl4_sgi_target_t;
+
+/*lint -esym(621, OKL4_SGI_TARGET_LISTED) */
+#define OKL4_SGI_TARGET_LISTED ((okl4_sgi_target_t)0x0U)
+/*lint -esym(621, OKL4_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_SGI_TARGET_ALL_OTHERS ((okl4_sgi_target_t)0x1U)
+/*lint -esym(621, OKL4_SGI_TARGET_SELF) */
+#define OKL4_SGI_TARGET_SELF ((okl4_sgi_target_t)0x2U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_MAX) */
+#define OKL4_SGI_TARGET_MAX ((okl4_sgi_target_t)0x2U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_INVALID) */
+#define OKL4_SGI_TARGET_INVALID ((okl4_sgi_target_t)0xffffffffU)
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var);
+
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_SGI_TARGET_LISTED) ||
+ (var == OKL4_SGI_TARGET_ALL_OTHERS) ||
+ (var == OKL4_SGI_TARGET_SELF));
+}
+
+
+/**
+ - BITS 3..0 - @ref OKL4_MASK_SGIINTID_GICD_SGIR
+ - BIT 15 - @ref OKL4_MASK_NSATT_GICD_SGIR
+ - BITS 23..16 - @ref OKL4_MASK_CPUTARGETLIST_GICD_SGIR
+ - BITS 25..24 - @ref OKL4_MASK_TARGETLISTFILTER_GICD_SGIR
+*/
+
+/*lint -esym(621, okl4_gicd_sgir_t) */
+typedef uint32_t okl4_gicd_sgir_t;
+
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid);
+
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt);
+
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist);
+
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter);
+
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x);
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_SGIR_SGIINTID_MASK) */
+#define OKL4_GICD_SGIR_SGIINTID_MASK ((okl4_gicd_sgir_t)15U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_MASK_SGIINTID_GICD_SGIR ((okl4_gicd_sgir_t)15U)
+/*lint -esym(621, OKL4_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_GICD_SGIR_NSATT_MASK) */
+#define OKL4_GICD_SGIR_NSATT_MASK ((okl4_gicd_sgir_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NSATT_GICD_SGIR) */
+#define OKL4_MASK_NSATT_GICD_SGIR ((okl4_gicd_sgir_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_GICD_SGIR_CPUTARGETLIST_MASK) */
+#define OKL4_GICD_SGIR_CPUTARGETLIST_MASK ((okl4_gicd_sgir_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_MASK_CPUTARGETLIST_GICD_SGIR ((okl4_gicd_sgir_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_GICD_SGIR_TARGETLISTFILTER_MASK) */
+#define OKL4_GICD_SGIR_TARGETLISTFILTER_MASK ((okl4_gicd_sgir_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_MASK_TARGETLISTFILTER_GICD_SGIR ((okl4_gicd_sgir_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/*lint -sem(okl4_gicd_sgir_getsgiintid, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x)
+{
+ okl4_interrupt_number_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 4;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_interrupt_number_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setsgiintid, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_gicd_sgir_setsgiintid) */
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 4;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_sgiintid;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getnsatt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 15;
+ _Bool field : 1;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setnsatt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_sgir_setnsatt) */
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 15;
+ _Bool field : 1;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_nsatt;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getcputargetlist, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x)
+{
+ uint8_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 8;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint8_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setcputargetlist, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_gicd_sgir_setcputargetlist) */
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 8;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_cputargetlist;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_gettargetlistfilter, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x)
+{
+ okl4_sgi_target_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 2;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_sgi_target_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_settargetlistfilter, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_gicd_sgir_settargetlistfilter) */
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 2;
+ } bits;
+ okl4_gicd_sgir_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_targetlistfilter;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x)
+{
+ *x = (okl4_gicd_sgir_t)32768U;
+}
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_gicd_sgir_t x = (okl4_gicd_sgir_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ The okl4_kmmu_t structure is used to represent a kernel MMU
+ context.
+*/
+
+struct okl4_kmmu {
+ okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+ The `okl4_ksp_arg_t` type represents an unsigned, machine-native
+ register-sized integer value used for KSP call arguments. Important: it is
+ truncated to guest register-size when guest register-size is smaller than
+ kernel register-size.
+*/
+
+typedef okl4_register_t okl4_ksp_arg_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_ksp_user_agent {
+ okl4_kcap_t kcap;
+ okl4_interrupt_number_t virq;
+};
+
+
+
+
+
+typedef uint32_t okl4_ksp_vdevice_class_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_tr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_data {
+ okl4_kcap_t kcap;
+ okl4_irq_t virq;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_ep_data {
+ struct okl4_pipe_data rx;
+ struct okl4_pipe_data tx;
+};
+
+
+
+
+
+typedef uint32_t okl4_link_role_t;
+
+/*lint -esym(621, OKL4_LINK_ROLE_SYMMETRIC) */
+#define OKL4_LINK_ROLE_SYMMETRIC ((okl4_link_role_t)0x0U)
+/*lint -esym(621, OKL4_LINK_ROLE_SERVER) */
+#define OKL4_LINK_ROLE_SERVER ((okl4_link_role_t)0x1U)
+/*lint -esym(621, OKL4_LINK_ROLE_CLIENT) */
+#define OKL4_LINK_ROLE_CLIENT ((okl4_link_role_t)0x2U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_MAX) */
+#define OKL4_LINK_ROLE_MAX ((okl4_link_role_t)0x2U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_INVALID) */
+#define OKL4_LINK_ROLE_INVALID ((okl4_link_role_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var);
+
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_LINK_ROLE_SYMMETRIC) ||
+ (var == OKL4_LINK_ROLE_SERVER) ||
+ (var == OKL4_LINK_ROLE_CLIENT));
+}
+
+
+
+typedef uint32_t okl4_link_transport_type_t;
+
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_link_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_LINK_TRANSPORT_TYPE_AXONS ((okl4_link_transport_type_t)0x1U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_LINK_TRANSPORT_TYPE_PIPES ((okl4_link_transport_type_t)0x2U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_LINK_TRANSPORT_TYPE_MAX ((okl4_link_transport_type_t)0x2U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_LINK_TRANSPORT_TYPE_INVALID ((okl4_link_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var);
+
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) ||
+ (var == OKL4_LINK_TRANSPORT_TYPE_AXONS) ||
+ (var == OKL4_LINK_TRANSPORT_TYPE_PIPES));
+}
+
+
+/**
+
+*/
+
+struct okl4_link {
+ __ptr64(okl4_string_t, name);
+ __ptr64(void *, opaque);
+ __ptr64(okl4_string_t, partner_name);
+ okl4_link_role_t role;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ union {
+ struct {
+ struct okl4_virtmem_item buffer;
+ okl4_irq_t virq_in;
+ okl4_kcap_t virq_out;
+ } shared_buffer;
+
+ struct {
+ struct okl4_axon_ep_data axon_ep;
+ okl4_ksize_t message_size;
+ okl4_count_t queue_length;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ } axons;
+
+ struct {
+ okl4_ksize_t message_size;
+ struct okl4_pipe_ep_data pipe_ep;
+ okl4_count_t queue_length;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ } pipes;
+
+ } transport;
+
+ okl4_link_transport_type_t transport_type;
+ _okl4_padding_t __padding4_4;
+ _okl4_padding_t __padding5_5;
+ _okl4_padding_t __padding6_6;
+ _okl4_padding_t __padding7_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_links {
+ okl4_count_t num_links;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64_array(struct okl4_link *, links)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_tr_t;
+
+
+
+
+/**
+ The okl4_machine_info_t structure holds machine-specific
+ constants that are only known at weave-time. Objects of this
+ type are typically obtained from the OKL4 environment.
+*/
+
+struct okl4_machine_info {
+ okl4_ksize_t l1_cache_line_size;
+ okl4_ksize_t l2_cache_line_size;
+ okl4_count_t num_cpus;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_merged_physpool {
+ okl4_paddr_t phys_addr;
+ okl4_count_t num_segments;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ struct okl4_virtmem_item segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef uint32_t okl4_microseconds_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_microvisor_timer {
+ okl4_kcap_t kcap;
+ okl4_irq_t virq;
+};
+
+
+
+
+/**
+ - BITS 15..0 - @ref OKL4_MASK_ERROR_MMU_LOOKUP_INDEX
+ - BITS 31..16 - @ref OKL4_MASK_INDEX_MMU_LOOKUP_INDEX
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_index_t) */
+typedef uint32_t okl4_mmu_lookup_index_t;
+
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error);
+
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index);
+
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_ERROR_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_ERROR_MASK ((okl4_mmu_lookup_index_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_ERROR_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_INDEX_MASK ((okl4_mmu_lookup_index_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_INDEX_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/*lint -sem(okl4_mmu_lookup_index_geterror, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x)
+{
+ okl4_error_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_mmu_lookup_index_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_error_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_seterror, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_seterror) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_mmu_lookup_index_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_error;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_index_getindex, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x)
+{
+ okl4_count_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ okl4_mmu_lookup_index_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_count_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_setindex, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_setindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ okl4_mmu_lookup_index_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_index;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x)
+{
+ *x = (okl4_mmu_lookup_index_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_mmu_lookup_index_t x = (okl4_mmu_lookup_index_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ - BITS 9..0 - @ref OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE
+ - BITS 63..10 - @ref OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_size_t) */
+typedef okl4_register_t okl4_mmu_lookup_size_t;
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index);
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10);
+
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK ((okl4_mmu_lookup_size_t)1023U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)1023U)
+/*lint -esym(621, OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK ((okl4_mmu_lookup_size_t)18014398509481983U << 10) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)18014398509481983U << 10)
+/*lint -esym(621, OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/*lint -sem(okl4_mmu_lookup_size_getsegindex, 1p, @n >= 0 && @n <= 1023) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x)
+{
+ okl4_count_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t field : 10;
+ } bits;
+ okl4_mmu_lookup_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_count_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsegindex, 2n >= 0 && 2n <= 1023) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsegindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t field : 10;
+ } bits;
+ okl4_mmu_lookup_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_seg_index;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_size_getsize10, 1p, @n >= 0 && @n <= 18014398509481983) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x)
+{
+ okl4_register_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 10;
+ uint64_t field : 54;
+ } bits;
+ okl4_mmu_lookup_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_register_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsize10, 2n >= 0 && 2n <= 18014398509481983) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsize10) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint64_t _skip : 10;
+ uint64_t field : 54;
+ } bits;
+ okl4_mmu_lookup_size_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint64_t)_size_10;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x)
+{
+ *x = (okl4_mmu_lookup_size_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force)
+{
+ okl4_mmu_lookup_size_t x = (okl4_mmu_lookup_size_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+typedef uint64_t okl4_nanoseconds_t;
+
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS ((okl4_nanoseconds_t)(36028797018963968U))
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS ((okl4_nanoseconds_t)(1000000U))
+
+
+
+/**
+ - BITS 2..0 - @ref _OKL4_MASK_RWX_PAGE_ATTRIBUTE
+ - BITS 31..4 - @ref _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE
+*/
+
+/*lint -esym(621, _okl4_page_attribute_t) */
+typedef uint32_t _okl4_page_attribute_t;
+
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx);
+
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib);
+
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x);
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_RWX_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_RWX_MASK ((_okl4_page_attribute_t)7U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_RWX_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)7U)
+/*lint -esym(621, _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK ((_okl4_page_attribute_t)268435455U << 4) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)268435455U << 4)
+/*lint -esym(621, _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/*lint -sem(_okl4_page_attribute_getrwx, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x)
+{
+ okl4_page_perms_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 3;
+ } bits;
+ _okl4_page_attribute_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_page_perms_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setrwx, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, _okl4_page_attribute_setrwx) */
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 3;
+ } bits;
+ _okl4_page_attribute_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_rwx;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_page_attribute_getattrib, 1p, @n >= 0 && @n <= 268435455) */
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x)
+{
+ okl4_page_cache_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ uint32_t field : 28;
+ } bits;
+ _okl4_page_attribute_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_page_cache_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setattrib, 2n >= 0 && 2n <= 268435455) */
+/*lint -esym(714, _okl4_page_attribute_setattrib) */
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ uint32_t field : 28;
+ } bits;
+ _okl4_page_attribute_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_attrib;
+ *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x)
+{
+ *x = (_okl4_page_attribute_t)0U;
+}
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force)
+{
+ _okl4_page_attribute_t x = (_okl4_page_attribute_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_DO_OP_PIPE_CONTROL
+ - BITS 3..1 - @ref OKL4_MASK_OPERATION_PIPE_CONTROL
+*/
+
+/*lint -esym(621, okl4_pipe_control_t) */
+typedef uint8_t okl4_pipe_control_t;
+
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op);
+
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation);
+
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x);
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED ((okl4_pipe_control_t)(4U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET ((okl4_pipe_control_t)(0U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED ((okl4_pipe_control_t)(3U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY ((okl4_pipe_control_t)(2U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY ((okl4_pipe_control_t)(1U))
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_DO_OP_MASK) */
+#define OKL4_PIPE_CONTROL_DO_OP_MASK (okl4_pipe_control_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_MASK_DO_OP_PIPE_CONTROL (okl4_pipe_control_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OPERATION_MASK) */
+#define OKL4_PIPE_CONTROL_OPERATION_MASK (okl4_pipe_control_t)(7U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_MASK_OPERATION_PIPE_CONTROL (okl4_pipe_control_t)(7U << 1)
+/*lint -esym(621, OKL4_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/*lint -sem(okl4_pipe_control_getdoop, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_control_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_control_setdoop, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_control_setdoop) */
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_control_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_do_op;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_control_getoperation, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x)
+{
+ uint8_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ uint32_t field : 3;
+ } bits;
+ okl4_pipe_control_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint8_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_control_setoperation, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_pipe_control_setoperation) */
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ uint32_t field : 3;
+ } bits;
+ okl4_pipe_control_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_operation;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x)
+{
+ *x = (okl4_pipe_control_t)0U;
+}
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force)
+{
+ okl4_pipe_control_t x = (okl4_pipe_control_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_RESET_PIPE_STATE
+ - BIT 1 - @ref OKL4_MASK_HALTED_PIPE_STATE
+ - BIT 2 - @ref OKL4_MASK_RX_READY_PIPE_STATE
+ - BIT 3 - @ref OKL4_MASK_TX_READY_PIPE_STATE
+ - BIT 4 - @ref OKL4_MASK_RX_AVAILABLE_PIPE_STATE
+ - BIT 5 - @ref OKL4_MASK_TX_AVAILABLE_PIPE_STATE
+ - BIT 6 - @ref OKL4_MASK_WAITING_PIPE_STATE
+ - BIT 7 - @ref OKL4_MASK_OVERQUOTA_PIPE_STATE
+*/
+
+/*lint -esym(621, okl4_pipe_state_t) */
+typedef uint8_t okl4_pipe_state_t;
+
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset);
+
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted);
+
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready);
+
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready);
+
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available);
+
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available);
+
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting);
+
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota);
+
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x);
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_PIPE_STATE_RESET_MASK) */
+#define OKL4_PIPE_STATE_RESET_MASK (okl4_pipe_state_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESET_PIPE_STATE) */
+#define OKL4_MASK_RESET_PIPE_STATE (okl4_pipe_state_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_HALTED_MASK) */
+#define OKL4_PIPE_STATE_HALTED_MASK (okl4_pipe_state_t)(1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_HALTED_PIPE_STATE) */
+#define OKL4_MASK_HALTED_PIPE_STATE (okl4_pipe_state_t)(1U << 1)
+/*lint -esym(621, OKL4_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_READY_MASK) */
+#define OKL4_PIPE_STATE_RX_READY_MASK (okl4_pipe_state_t)(1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_MASK_RX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 2)
+/*lint -esym(621, OKL4_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_READY_MASK) */
+#define OKL4_PIPE_STATE_TX_READY_MASK (okl4_pipe_state_t)(1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_MASK_TX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 3)
+/*lint -esym(621, OKL4_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_RX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_RX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 4)
+/*lint -esym(621, OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_TX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_TX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 5)
+/*lint -esym(621, OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_WAITING_MASK) */
+#define OKL4_PIPE_STATE_WAITING_MASK (okl4_pipe_state_t)(1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WAITING_PIPE_STATE) */
+#define OKL4_MASK_WAITING_PIPE_STATE (okl4_pipe_state_t)(1U << 6)
+/*lint -esym(621, OKL4_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_OVERQUOTA_MASK) */
+#define OKL4_PIPE_STATE_OVERQUOTA_MASK (okl4_pipe_state_t)(1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_MASK_OVERQUOTA_PIPE_STATE (okl4_pipe_state_t)(1U << 7)
+/*lint -esym(621, OKL4_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/*lint -sem(okl4_pipe_state_getreset, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_setreset, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setreset) */
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_reset;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gethalted, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_sethalted, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_sethalted) */
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_halted;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxready) */
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_rx_ready;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxready) */
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_tx_ready;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_rx_available;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_tx_available;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getwaiting, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 6;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_setwaiting, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setwaiting) */
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 6;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_waiting;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getoverquota, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_pipe_state_setoverquota, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setoverquota) */
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 7;
+ _Bool field : 1;
+ } bits;
+ okl4_pipe_state_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_overquota;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x)
+{
+ *x = (okl4_pipe_state_t)1U;
+}
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force)
+{
+ okl4_pipe_state_t x = (okl4_pipe_state_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+typedef uint32_t okl4_power_state_t;
+
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(0U))
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE ((okl4_power_state_t)(256U))
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(1U))
+
+
+
+/**
+ The okl4_priority_t type represents a thread scheduling priority.
+ Valid prioritys range from [0, CONFIG\_SCHEDULER\_NUM\_PRIOS).
+*/
+
+typedef int8_t okl4_priority_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_tr_t;
+
+
+
+
+/**
+ The okl4_register_set_t type is an enumeration identifying one of
+ the register sets supported by the host machine. This includes the
+ general-purpose registers, along with other CPU-specific register
+ sets such as floating point or vector registers.
+
+ - @ref OKL4_REGISTER_SET_CPU_REGS
+ - @ref OKL4_REGISTER_SET_VFP_REGS
+ - @ref OKL4_REGISTER_SET_VFP_CTRL_REGS
+ - @ref OKL4_REGISTER_SET_VFP64_REGS
+ - @ref OKL4_REGISTER_SET_VFP128_REGS
+ - @ref OKL4_REGISTER_SET_MAX
+ - @ref OKL4_REGISTER_SET_INVALID
+*/
+
+typedef uint32_t okl4_register_set_t;
+
+/*lint -esym(621, OKL4_REGISTER_SET_CPU_REGS) */
+#define OKL4_REGISTER_SET_CPU_REGS ((okl4_register_set_t)0x0U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_REGS) */
+#define OKL4_REGISTER_SET_VFP_REGS ((okl4_register_set_t)0x1U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_REGISTER_SET_VFP_CTRL_REGS ((okl4_register_set_t)0x2U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP64_REGS) */
+#define OKL4_REGISTER_SET_VFP64_REGS ((okl4_register_set_t)0x3U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP128_REGS) */
+#define OKL4_REGISTER_SET_VFP128_REGS ((okl4_register_set_t)0x4U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_MAX) */
+#define OKL4_REGISTER_SET_MAX ((okl4_register_set_t)0x4U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_INVALID) */
+#define OKL4_REGISTER_SET_INVALID ((okl4_register_set_t)0xffffffffU)
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var);
+
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_REGISTER_SET_CPU_REGS) ||
+ (var == OKL4_REGISTER_SET_VFP_REGS) ||
+ (var == OKL4_REGISTER_SET_VFP_CTRL_REGS) ||
+ (var == OKL4_REGISTER_SET_VFP64_REGS) ||
+ (var == OKL4_REGISTER_SET_VFP128_REGS));
+}
+
+
+
+typedef okl4_psize_t okl4_vsize_t;
+
+
+
+
+/**
+ The okl4_register_and_set_t type is a bitfield containing a register
+ set identifier of type okl4_register_set_t, and an index into that
+ register set.
+
+ - BITS 15..0 - @ref OKL4_MASK_OFFSET_REGISTER_AND_SET
+ - BITS 31..16 - @ref OKL4_MASK_SET_REGISTER_AND_SET
+*/
+
+/*lint -esym(621, okl4_register_and_set_t) */
+typedef uint32_t okl4_register_and_set_t;
+
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset);
+
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set);
+
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x);
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_REGISTER_AND_SET_OFFSET_MASK) */
+#define OKL4_REGISTER_AND_SET_OFFSET_MASK ((okl4_register_and_set_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_MASK_OFFSET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_REGISTER_AND_SET_SET_MASK) */
+#define OKL4_REGISTER_AND_SET_SET_MASK ((okl4_register_and_set_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_MASK_SET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/*lint -sem(okl4_register_and_set_getoffset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x)
+{
+ okl4_vsize_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_register_and_set_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_vsize_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_register_and_set_setoffset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setoffset) */
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ okl4_register_and_set_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_offset;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_register_and_set_getset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x)
+{
+ okl4_register_set_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ okl4_register_and_set_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_register_set_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_register_and_set_setset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setset) */
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ okl4_register_and_set_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_set;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x)
+{
+ *x = (okl4_register_and_set_t)0U;
+}
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_register_and_set_t x = (okl4_register_and_set_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+struct okl4_cpu_registers {
+ okl4_register_t x[31];
+ okl4_register_t sp_el0;
+ okl4_register_t ip;
+ uint32_t cpsr;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ okl4_register_t sp_EL1;
+ okl4_register_t elr_EL1;
+ uint32_t spsr_EL1;
+ uint32_t spsr_abt;
+ uint32_t spsr_und;
+ uint32_t spsr_irq;
+ uint32_t spsr_fiq;
+ uint32_t csselr_EL1;
+ okl4_arm_sctlr_t sctlr_EL1;
+ uint32_t cpacr_EL1;
+ uint64_t ttbr0_EL1;
+ uint64_t ttbr1_EL1;
+ uint64_t tcr_EL1;
+ uint32_t dacr32_EL2;
+ uint32_t ifsr32_EL2;
+ uint32_t esr_EL1;
+ _okl4_padding_t __padding4_4; /**< Padding 8 */
+ _okl4_padding_t __padding5_5; /**< Padding 8 */
+ _okl4_padding_t __padding6_6; /**< Padding 8 */
+ _okl4_padding_t __padding7_7; /**< Padding 8 */
+ uint64_t far_EL1;
+ uint64_t par_EL1;
+ uint64_t mair_EL1;
+ uint64_t vbar_EL1;
+ uint32_t contextidr_EL1;
+ _okl4_padding_t __padding8_4; /**< Padding 8 */
+ _okl4_padding_t __padding9_5; /**< Padding 8 */
+ _okl4_padding_t __padding10_6; /**< Padding 8 */
+ _okl4_padding_t __padding11_7; /**< Padding 8 */
+ uint64_t tpidr_EL1;
+ uint64_t tpidrro_EL0;
+ uint64_t tpidr_EL0;
+ uint32_t pmcr_EL0;
+ _okl4_padding_t __padding12_4; /**< Padding 8 */
+ _okl4_padding_t __padding13_5; /**< Padding 8 */
+ _okl4_padding_t __padding14_6; /**< Padding 8 */
+ _okl4_padding_t __padding15_7; /**< Padding 8 */
+ uint64_t pmccntr_EL0;
+ uint32_t fpexc32_EL2;
+ uint32_t cntkctl_EL1;
+};
+
+
+
+
+
+
+/**
+ The okl4_cpu_registers_t type represents a set of CPU general-purpose
+ registers on the native machine.
+*/
+
+typedef struct okl4_cpu_registers okl4_cpu_registers_t;
+
+
+
+
+/**
+ The `okl4_rights_t` type represents a set of operations that are allowed to
+ be performed using a given cap.
+*/
+
+typedef uint32_t okl4_rights_t;
+
+
+
+
+
+typedef uint64_t okl4_soc_time_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_schedule_profile_data {
+ okl4_soc_time_t timestamp;
+ okl4_soc_time_t cpu_time;
+ okl4_count_t context_switches;
+ okl4_count_t cpu_migrations;
+ okl4_count_t cpu_hwirqs;
+ okl4_count_t cpu_virqs;
+};
+
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_scheduler_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_scheduler_virq_flags_t;
+
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK) */
+#define OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK ((okl4_scheduler_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS ((okl4_scheduler_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_scheduler_virq_flags_getpowersuspended, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_scheduler_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_scheduler_virq_flags_setpowersuspended, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_scheduler_virq_flags_setpowersuspended) */
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_scheduler_virq_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_power_suspended;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x)
+{
+ *x = (okl4_scheduler_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+ okl4_scheduler_virq_flags_t x = (okl4_scheduler_virq_flags_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ The `okl4_scount_t` type represents a natural number of items or
+ iterations. Negative values represent errors. Use `okl4_count_t` if error
+ values are not required.
+*/
+
+typedef int32_t okl4_scount_t;
+
+
+
+
+/**
+ The SDK_VERSION contains a global SDK wide versioning of software.
+
+ - BITS 5..0 - @ref OKL4_MASK_MAINTENANCE_SDK_VERSION
+ - BITS 15..8 - @ref OKL4_MASK_RELEASE_SDK_VERSION
+ - BITS 21..16 - @ref OKL4_MASK_MINOR_SDK_VERSION
+ - BITS 27..24 - @ref OKL4_MASK_MAJOR_SDK_VERSION
+ - BIT 28 - @ref OKL4_MASK_RES0_FLAG_SDK_VERSION
+ - BIT 30 - @ref OKL4_MASK_DEV_FLAG_SDK_VERSION
+ - BIT 31 - @ref OKL4_MASK_FORMAT_FLAG_SDK_VERSION
+*/
+
+/*lint -esym(621, okl4_sdk_version_t) */
+typedef uint32_t okl4_sdk_version_t;
+
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag);
+
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag);
+
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag);
+
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major);
+
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor);
+
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release);
+
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance);
+
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x);
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SDK_VERSION_MAINTENANCE_MASK) */
+#define OKL4_SDK_VERSION_MAINTENANCE_MASK ((okl4_sdk_version_t)63U) /* Deprecated */
+/** Maintenance number */
+/*lint -esym(621, OKL4_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_MASK_MAINTENANCE_SDK_VERSION ((okl4_sdk_version_t)63U)
+/*lint -esym(621, OKL4_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_RELEASE_MASK) */
+#define OKL4_SDK_VERSION_RELEASE_MASK ((okl4_sdk_version_t)255U << 8) /* Deprecated */
+/** SDK Release Number */
+/*lint -esym(621, OKL4_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_MASK_RELEASE_SDK_VERSION ((okl4_sdk_version_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_SDK_VERSION_MINOR_MASK) */
+#define OKL4_SDK_VERSION_MINOR_MASK ((okl4_sdk_version_t)63U << 16) /* Deprecated */
+/** SDK Minor Number */
+/*lint -esym(621, OKL4_MASK_MINOR_SDK_VERSION) */
+#define OKL4_MASK_MINOR_SDK_VERSION ((okl4_sdk_version_t)63U << 16)
+/*lint -esym(621, OKL4_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_MAJOR_MASK) */
+#define OKL4_SDK_VERSION_MAJOR_MASK ((okl4_sdk_version_t)15U << 24) /* Deprecated */
+/** SDK Major Number */
+/*lint -esym(621, OKL4_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_MASK_MAJOR_SDK_VERSION ((okl4_sdk_version_t)15U << 24)
+/*lint -esym(621, OKL4_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_SDK_VERSION_RES0_FLAG_MASK) */
+#define OKL4_SDK_VERSION_RES0_FLAG_MASK ((okl4_sdk_version_t)1U << 28) /* Deprecated */
+/** Reserved */
+/*lint -esym(621, OKL4_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_MASK_RES0_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_DEV_FLAG_MASK) */
+#define OKL4_SDK_VERSION_DEV_FLAG_MASK ((okl4_sdk_version_t)1U << 30) /* Deprecated */
+/** Unreleased internal development version */
+/*lint -esym(621, OKL4_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_MASK_DEV_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_FORMAT_FLAG_MASK) */
+#define OKL4_SDK_VERSION_FORMAT_FLAG_MASK ((okl4_sdk_version_t)1U << 31) /* Deprecated */
+/** Format: 0 = Version format 1, 1 = Reserved */
+/*lint -esym(621, OKL4_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_MASK_FORMAT_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/*lint -sem(okl4_sdk_version_getmaintenance, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 6;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmaintenance, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setmaintenance) */
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 6;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_maintenance;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getrelease, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ uint32_t field : 8;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setrelease, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_sdk_version_setrelease) */
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ uint32_t field : 8;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_release;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getminor, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 6;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setminor, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setminor) */
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 6;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_minor;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getmajor, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 4;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmajor, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_sdk_version_setmajor) */
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 24;
+ uint32_t field : 4;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_major;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getres0flag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setres0flag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setres0flag) */
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_res0_flag;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getdevflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setdevflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setdevflag) */
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_dev_flag;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getformatflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 31;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_sdk_version_setformatflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setformatflag) */
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 31;
+ uint32_t field : 1;
+ } bits;
+ okl4_sdk_version_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_format_flag;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x)
+{
+ *x = (okl4_sdk_version_t)0U;
+}
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_sdk_version_t x = (okl4_sdk_version_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffer {
+ okl4_paddr_t physical_base;
+ struct okl4_virtmem_item virtmem_item;
+ okl4_kcap_t cap;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffers_array {
+ __ptr64(struct okl4_shared_buffer *, buffers);
+ okl4_count_t num_buffers;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+typedef okl4_kcap_t okl4_signal_t;
+
+
+
+
+
+
+
+
+/**
+ The `okl4_sregister_t` type represents a signed, machine-native
+ register-sized integer value.
+*/
+
+typedef int64_t okl4_sregister_t;
+
+
+
+
+
+typedef uint64_t okl4_ticks_t;
+
+
+
+
+/**
+ - BIT 0 - @ref OKL4_MASK_ACTIVE_TIMER_FLAGS
+ - BIT 1 - @ref OKL4_MASK_PERIODIC_TIMER_FLAGS
+ - BIT 2 - @ref OKL4_MASK_ABSOLUTE_TIMER_FLAGS
+ - BIT 3 - @ref OKL4_MASK_UNITS_TIMER_FLAGS
+ - BIT 4 - @ref OKL4_MASK_ALIGN_TIMER_FLAGS
+ - BIT 5 - @ref OKL4_MASK_WATCHDOG_TIMER_FLAGS
+ - BIT 30 - @ref OKL4_MASK_RELOAD_TIMER_FLAGS
+ - BIT 31 - @ref OKL4_MASK_TIMESLICE_TIMER_FLAGS
+*/
+
+/*lint -esym(621, okl4_timer_flags_t) */
+typedef uint32_t okl4_timer_flags_t;
+
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active);
+
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic);
+
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute);
+
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units);
+
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align);
+
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog);
+
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload);
+
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice);
+
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x);
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_TIMER_FLAGS_ACTIVE_MASK) */
+#define OKL4_TIMER_FLAGS_ACTIVE_MASK ((okl4_timer_flags_t)1U) /* Deprecated */
+/** Indicates that the timer has a timeout set */
+/*lint -esym(621, OKL4_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_MASK_ACTIVE_TIMER_FLAGS ((okl4_timer_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_PERIODIC_MASK) */
+#define OKL4_TIMER_FLAGS_PERIODIC_MASK ((okl4_timer_flags_t)1U << 1) /* Deprecated */
+/** Indicates that the timer is periodic, otherwise it is one-shot */
+/*lint -esym(621, OKL4_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_MASK_PERIODIC_TIMER_FLAGS ((okl4_timer_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ABSOLUTE_MASK) */
+#define OKL4_TIMER_FLAGS_ABSOLUTE_MASK ((okl4_timer_flags_t)1U << 2) /* Deprecated */
+/** Indicates that the timeout value is absolute, otherwise it is relative */
+/*lint -esym(621, OKL4_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_MASK_ABSOLUTE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_UNITS_MASK) */
+#define OKL4_TIMER_FLAGS_UNITS_MASK ((okl4_timer_flags_t)1U << 3) /* Deprecated */
+/** Select time in UNITS of raw ticks */
+/*lint -esym(621, OKL4_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_MASK_UNITS_TIMER_FLAGS ((okl4_timer_flags_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ALIGN_MASK) */
+#define OKL4_TIMER_FLAGS_ALIGN_MASK ((okl4_timer_flags_t)1U << 4) /* Deprecated */
+/** Align first timeout of a periodic timer to a multiple of the timeout length */
+/*lint -esym(621, OKL4_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_MASK_ALIGN_TIMER_FLAGS ((okl4_timer_flags_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_WATCHDOG_MASK) */
+#define OKL4_TIMER_FLAGS_WATCHDOG_MASK ((okl4_timer_flags_t)1U << 5) /* Deprecated */
+/** Enter the kernel interactive debugger on timer expiry (no effect for production builds of the kernel) */
+/*lint -esym(621, OKL4_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_MASK_WATCHDOG_TIMER_FLAGS ((okl4_timer_flags_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_RELOAD_MASK) */
+#define OKL4_TIMER_FLAGS_RELOAD_MASK ((okl4_timer_flags_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_MASK_RELOAD_TIMER_FLAGS ((okl4_timer_flags_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_TIMESLICE_MASK) */
+#define OKL4_TIMER_FLAGS_TIMESLICE_MASK ((okl4_timer_flags_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_MASK_TIMESLICE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/*lint -sem(okl4_timer_flags_getactive, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setactive, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setactive) */
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_active;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getperiodic, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setperiodic, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setperiodic) */
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 1;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_periodic;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getabsolute, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setabsolute, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setabsolute) */
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 2;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_absolute;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getunits, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setunits, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setunits) */
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 3;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_units;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setalign) */
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 4;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_align;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getwatchdog, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setwatchdog, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setwatchdog) */
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 5;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_watchdog;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getreload, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_setreload, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setreload) */
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 30;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_reload;
+ *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_gettimeslice, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 31;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(okl4_timer_flags_settimeslice, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_settimeslice) */
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 31;
+ _Bool field : 1;
+ } bits;
+ okl4_timer_flags_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_timeslice;
+ *x = _conv.raw;
+}
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x)
+{
+ *x = (okl4_timer_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force)
+{
+ okl4_timer_flags_t x = (okl4_timer_flags_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+struct _okl4_tracebuffer_buffer_header {
+ okl4_soc_time_t timestamp;
+ okl4_count_t wrap;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ okl4_ksize_t size;
+ okl4_ksize_t head;
+ okl4_ksize_t offset;
+};
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_tracebuffer_env {
+ struct okl4_virtmem_item virt;
+ okl4_interrupt_number_t virq;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+struct _okl4_tracebuffer_header {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t id;
+ okl4_count_t num_buffers;
+ okl4_ksize_t buffer_size;
+ okl4_atomic_uint32_t log_mask;
+ okl4_atomic_uint32_t active_buffer;
+ okl4_atomic_uint32_t grabbed_buffer;
+ okl4_atomic_uint32_t empty_buffers;
+ struct _okl4_tracebuffer_buffer_header buffers[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_class_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_TRACEPOINT_CLASS_THREAD_STATE ((okl4_tracepoint_class_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_TRACEPOINT_CLASS_SYSCALLS ((okl4_tracepoint_class_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_TRACEPOINT_CLASS_PRIMARY ((okl4_tracepoint_class_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_TRACEPOINT_CLASS_SECONDARY ((okl4_tracepoint_class_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_TRACEPOINT_CLASS_TERTIARY ((okl4_tracepoint_class_t)0x4U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_MAX) */
+#define OKL4_TRACEPOINT_CLASS_MAX ((okl4_tracepoint_class_t)0x4U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_TRACEPOINT_CLASS_INVALID ((okl4_tracepoint_class_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_TRACEPOINT_CLASS_THREAD_STATE) ||
+ (var == OKL4_TRACEPOINT_CLASS_SYSCALLS) ||
+ (var == OKL4_TRACEPOINT_CLASS_PRIMARY) ||
+ (var == OKL4_TRACEPOINT_CLASS_SECONDARY) ||
+ (var == OKL4_TRACEPOINT_CLASS_TERTIARY));
+}
+
+
+/**
+ - BITS 7..0 - @ref _OKL4_MASK_ID_TRACEPOINT_DESC
+ - BIT 8 - @ref _OKL4_MASK_USER_TRACEPOINT_DESC
+ - BIT 9 - @ref _OKL4_MASK_BIN_TRACEPOINT_DESC
+ - BITS 15..10 - @ref _OKL4_MASK_RECLEN_TRACEPOINT_DESC
+ - BITS 21..16 - @ref _OKL4_MASK_CPUID_TRACEPOINT_DESC
+ - BITS 27..22 - @ref _OKL4_MASK_THREADID_TRACEPOINT_DESC
+ - BITS 31..28 - @ref _OKL4_MASK__R1_TRACEPOINT_DESC
+*/
+
+/*lint -esym(621, _okl4_tracepoint_desc_t) */
+typedef uint32_t _okl4_tracepoint_desc_t;
+
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1);
+
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_ID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_ID_MASK ((_okl4_tracepoint_desc_t)255U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_ID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)255U)
+/*lint -esym(621, _OKL4_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_USER_MASK) */
+#define _OKL4_TRACEPOINT_DESC_USER_MASK ((_okl4_tracepoint_desc_t)1U << 8) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_MASK_USER_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 8)
+/*lint -esym(621, _OKL4_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_BIN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_BIN_MASK ((_okl4_tracepoint_desc_t)1U << 9) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_BIN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 9)
+/*lint -esym(621, _OKL4_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_RECLEN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_RECLEN_MASK ((_okl4_tracepoint_desc_t)63U << 10) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_RECLEN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 10)
+/*lint -esym(621, _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_CPUID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_CPUID_MASK ((_okl4_tracepoint_desc_t)63U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_CPUID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 16)
+/*lint -esym(621, _OKL4_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_THREADID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_THREADID_MASK ((_okl4_tracepoint_desc_t)63U << 22) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_THREADID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 22)
+/*lint -esym(621, _OKL4_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC__R1_MASK) */
+#define _OKL4_TRACEPOINT_DESC__R1_MASK ((_okl4_tracepoint_desc_t)15U << 28) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_MASK__R1_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)15U << 28)
+/*lint -esym(621, _OKL4_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/*lint -sem(_okl4_tracepoint_desc_getid, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 8;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setid, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, _okl4_tracepoint_desc_setid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 8;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_id;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getuser, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ _Bool field : 1;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setuser, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setuser) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 8;
+ _Bool field : 1;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_user;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getbin, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x)
+{
+ okl4_bool_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 9;
+ _Bool field : 1;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_bool_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setbin, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setbin) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 9;
+ _Bool field : 1;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (_Bool)_bin;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getreclen, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 10;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setreclen, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setreclen) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 10;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_reclen;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getcpuid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x)
+{
+ okl4_count_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (okl4_count_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setcpuid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setcpuid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_cpuid;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getthreadid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 22;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setthreadid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setthreadid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 22;
+ uint32_t field : 6;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_threadid;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getr1, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ uint32_t field : 4;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setr1, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, _okl4_tracepoint_desc_setr1) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 28;
+ uint32_t field : 4;
+ } bits;
+ _okl4_tracepoint_desc_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)__r1;
+ *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x)
+{
+ *x = (_okl4_tracepoint_desc_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force)
+{
+ _okl4_tracepoint_desc_t x = (_okl4_tracepoint_desc_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+/**
+ - BITS 15..0 - @ref _OKL4_MASK_CLASS_TRACEPOINT_MASKS
+ - BITS 31..16 - @ref _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS
+*/
+
+/*lint -esym(621, _okl4_tracepoint_masks_t) */
+typedef uint32_t _okl4_tracepoint_masks_t;
+
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class);
+
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem);
+
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_CLASS_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_CLASS_MASK ((_okl4_tracepoint_masks_t)65535U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_CLASS_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U)
+/*lint -esym(621, _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK ((_okl4_tracepoint_masks_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U << 16)
+/*lint -esym(621, _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/*lint -sem(_okl4_tracepoint_masks_getclass, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ _okl4_tracepoint_masks_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setclass, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setclass) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t field : 16;
+ } bits;
+ _okl4_tracepoint_masks_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_class;
+ *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_masks_getsubsystem, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x)
+{
+ uint32_t field;
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ _okl4_tracepoint_masks_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ field = (uint32_t)_conv.bits.field;
+ return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setsubsystem, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setsubsystem) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 16;
+ uint32_t field : 16;
+ } bits;
+ _okl4_tracepoint_masks_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_subsystem;
+ *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x)
+{
+ *x = (_okl4_tracepoint_masks_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force)
+{
+ _okl4_tracepoint_masks_t x = (_okl4_tracepoint_masks_t)p;
+ (void)force;
+ return x;
+}
+
+
+
+
+struct okl4_tracepoint_entry_base {
+ uint32_t time_offset;
+ _okl4_tracepoint_masks_t masks;
+ _okl4_tracepoint_desc_t description;
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_evt_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE ((okl4_tracepoint_evt_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE ((okl4_tracepoint_evt_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH ((okl4_tracepoint_evt_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV ((okl4_tracepoint_evt_t)0x4U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED ((okl4_tracepoint_evt_t)0x5U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA ((okl4_tracepoint_evt_t)0x6U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE ((okl4_tracepoint_evt_t)0x7U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT ((okl4_tracepoint_evt_t)0x8U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA ((okl4_tracepoint_evt_t)0x9U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE ((okl4_tracepoint_evt_t)0xaU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT ((okl4_tracepoint_evt_t)0xbU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND ((okl4_tracepoint_evt_t)0xcU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xdU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0xeU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0xfU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x10U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x11U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x12U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x13U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x14U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x15U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x16U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x17U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x18U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x19U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x1eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x1fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x20U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x21U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x22U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x23U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x24U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x25U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x26U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x27U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x28U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x29U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x2fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x30U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x31U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x32U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x33U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x34U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x35U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x36U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x37U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x38U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x39U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x3eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x3fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x40U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x41U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x42U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND ((okl4_tracepoint_evt_t)0x43U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x44U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x45U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x46U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x47U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x48U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x49U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x4dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x4eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x4fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x50U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x51U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x52U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_MAX) */
+#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x52U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_INVALID) */
+#define OKL4_TRACEPOINT_EVT_INVALID ((okl4_tracepoint_evt_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) ||
+ (var == OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_START) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_START) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_level_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_TRACEPOINT_LEVEL_DEBUG ((okl4_tracepoint_level_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_TRACEPOINT_LEVEL_INFO ((okl4_tracepoint_level_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_TRACEPOINT_LEVEL_WARN ((okl4_tracepoint_level_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_TRACEPOINT_LEVEL_CRITICAL ((okl4_tracepoint_level_t)0x3U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_TRACEPOINT_LEVEL_MAX ((okl4_tracepoint_level_t)0x3U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_TRACEPOINT_LEVEL_INVALID ((okl4_tracepoint_level_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_TRACEPOINT_LEVEL_DEBUG) ||
+ (var == OKL4_TRACEPOINT_LEVEL_INFO) ||
+ (var == OKL4_TRACEPOINT_LEVEL_WARN) ||
+ (var == OKL4_TRACEPOINT_LEVEL_CRITICAL));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_mask_t;
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_subsystem_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER ((okl4_tracepoint_subsystem_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_TRACE ((okl4_tracepoint_subsystem_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_CORE ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_MAX ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_INVALID ((okl4_tracepoint_subsystem_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) ||
+ (var == OKL4_TRACEPOINT_SUBSYSTEM_TRACE) ||
+ (var == OKL4_TRACEPOINT_SUBSYSTEM_CORE));
+}
+
+
+
+struct okl4_tracepoint_unpacked_entry {
+ struct okl4_tracepoint_entry_base entry;
+ uint32_t data[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_vclient_info {
+ struct okl4_axon_ep_data axon_ep;
+ __ptr64(void *, opaque);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_entry {
+ okl4_kcap_t vcpu;
+ okl4_kcap_t ipi;
+ okl4_interrupt_number_t irq;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ okl4_register_t stack_pointer;
+};
+
+
+
+
+
+typedef okl4_arm_mpidr_t okl4_vcpu_id_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_table {
+ okl4_count_t num_vcpus;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(struct okl4_vcpu_entry *, vcpu);
+};
+
+
+
+
+/**
+ The okl4_vfp_ctrl_registers object represents the set of control
+ registers in the ARM VFP unit.
+*/
+
+struct okl4_vfp_ctrl_registers {
+ uint32_t fpsr;
+ uint32_t fpcr;
+};
+
+
+
+
+
+
+/**
+ The okl4_vfp_registers_t type represents a set of VFP registers on
+ the native machine.
+*/
+
+typedef struct okl4_vfp_ctrl_registers okl4_vfp_ctrl_registers_t;
+
+
+
+
+/**
+ The okl4_vfp_ops_t object represents the set of operations that may be
+ performed on the ARM VFP unit.
+
+ - @ref OKL4_VFP_OPS_MAX
+ - @ref OKL4_VFP_OPS_INVALID
+*/
+
+typedef uint32_t okl4_vfp_ops_t;
+
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_MAX) */
+#define OKL4_VFP_OPS_MAX ((okl4_vfp_ops_t)0x0U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_INVALID) */
+#define OKL4_VFP_OPS_INVALID ((okl4_vfp_ops_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var);
+
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((okl4_bool_t)0);
+}
+
+
+
+struct okl4_vfp_register {
+ __attribute__((aligned(16))) uint8_t __bytes[16];
+};
+
+
+
+
+
+
+
+typedef struct okl4_vfp_register okl4_vfp_register_t;
+
+
+
+
+/**
+ The okl4_vfp_registers object represents the set of registers in the
+ ARM VFP unit, including the control registers.
+*/
+
+struct okl4_vfp_registers {
+ okl4_vfp_register_t v0;
+ okl4_vfp_register_t v1;
+ okl4_vfp_register_t v2;
+ okl4_vfp_register_t v3;
+ okl4_vfp_register_t v4;
+ okl4_vfp_register_t v5;
+ okl4_vfp_register_t v6;
+ okl4_vfp_register_t v7;
+ okl4_vfp_register_t v8;
+ okl4_vfp_register_t v9;
+ okl4_vfp_register_t v10;
+ okl4_vfp_register_t v11;
+ okl4_vfp_register_t v12;
+ okl4_vfp_register_t v13;
+ okl4_vfp_register_t v14;
+ okl4_vfp_register_t v15;
+ okl4_vfp_register_t v16;
+ okl4_vfp_register_t v17;
+ okl4_vfp_register_t v18;
+ okl4_vfp_register_t v19;
+ okl4_vfp_register_t v20;
+ okl4_vfp_register_t v21;
+ okl4_vfp_register_t v22;
+ okl4_vfp_register_t v23;
+ okl4_vfp_register_t v24;
+ okl4_vfp_register_t v25;
+ okl4_vfp_register_t v26;
+ okl4_vfp_register_t v27;
+ okl4_vfp_register_t v28;
+ okl4_vfp_register_t v29;
+ okl4_vfp_register_t v30;
+ okl4_vfp_register_t v31;
+ struct okl4_vfp_ctrl_registers control;
+ _okl4_padding_t __padding0_8; /**< Padding 16 */
+ _okl4_padding_t __padding1_9; /**< Padding 16 */
+ _okl4_padding_t __padding2_10; /**< Padding 16 */
+ _okl4_padding_t __padding3_11; /**< Padding 16 */
+ _okl4_padding_t __padding4_12; /**< Padding 16 */
+ _okl4_padding_t __padding5_13; /**< Padding 16 */
+ _okl4_padding_t __padding6_14; /**< Padding 16 */
+ _okl4_padding_t __padding7_15; /**< Padding 16 */
+};
+
+
+
+
+
+
+/**
+ The okl4_vfp_registers_t type represents a set of VFP registers on
+ the native machine.
+*/
+
+typedef struct okl4_vfp_registers okl4_vfp_registers_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_pool {
+ struct okl4_virtmem_item pool;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtual_interrupt_lines {
+ okl4_count_t num_lines;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(okl4_kcap_t *, lines);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vserver_info {
+ struct {
+ __ptr64(struct okl4_axon_ep_data *, data);
+ okl4_count_t max_messages;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ okl4_ksize_t message_size;
+ } channels;
+
+ okl4_count_t num_clients;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_service_descriptor {
+ __ptr64(okl4_string_t, name);
+ __ptr64(okl4_string_t, protocol);
+ __ptr64(void *, RESERVED);
+};
+
+
+
+
+
+typedef uint32_t okl4_vservices_transport_type_t;
+
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_AXON ((okl4_vservices_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_vservices_transport_type_t)0x1U)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_MAX ((okl4_vservices_transport_type_t)0x1U)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_INVALID ((okl4_vservices_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var);
+
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var)
+{
+ /*lint --e{944} Disable dead expression detection */
+ /*lint --e{948} --e{845} Disable constant always zero */
+ return ((var == OKL4_VSERVICES_TRANSPORT_TYPE_AXON) ||
+ (var == OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER));
+}
+
+
+/**
+
+*/
+
+struct okl4_vservices_transport_microvisor {
+ okl4_bool_t is_server;
+ _okl4_padding_t __padding0_1;
+ _okl4_padding_t __padding1_2;
+ _okl4_padding_t __padding2_3;
+ okl4_vservices_transport_type_t type;
+ union {
+ struct {
+ struct okl4_axon_ep_data ep;
+ okl4_ksize_t message_size;
+ okl4_count_t queue_length;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ } axon;
+
+ struct {
+ okl4_ksize_t message_size;
+ okl4_count_t queue_length;
+ _okl4_padding_t __padding0_4; /**< Padding 8 */
+ _okl4_padding_t __padding1_5; /**< Padding 8 */
+ _okl4_padding_t __padding2_6; /**< Padding 8 */
+ _okl4_padding_t __padding3_7; /**< Padding 8 */
+ struct okl4_virtmem_item rx;
+ okl4_count_t rx_batch_size;
+ okl4_count_t rx_notify_bits;
+ struct okl4_virtmem_item tx;
+ okl4_count_t tx_batch_size;
+ okl4_count_t tx_notify_bits;
+ } shared_buffer;
+
+ } u;
+
+ struct okl4_virtual_interrupt_lines virqs_in;
+ struct okl4_virtual_interrupt_lines virqs_out;
+ okl4_count_t num_services;
+ _okl4_padding_t __padding3_4;
+ _okl4_padding_t __padding4_5;
+ _okl4_padding_t __padding5_6;
+ _okl4_padding_t __padding6_7;
+ __ptr64(struct okl4_vservices_service_descriptor *, services);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_transports {
+ okl4_count_t num_transports;
+ _okl4_padding_t __padding0_4;
+ _okl4_padding_t __padding1_5;
+ _okl4_padding_t __padding2_6;
+ _okl4_padding_t __padding3_7;
+ __ptr64(struct okl4_vservices_transport_microvisor *, transports);
+};
+
+
+
+
+
+typedef struct okl4_axon_data okl4_axon_data_t;
+typedef struct okl4_axon_ep_data okl4_axon_ep_data_t;
+typedef struct okl4_range_item okl4_range_item_t;
+typedef struct okl4_virtmem_item okl4_virtmem_item_t;
+typedef struct okl4_cell_management_item okl4_cell_management_item_t;
+typedef struct okl4_cell_management okl4_cell_management_t;
+typedef struct okl4_segment_mapping okl4_segment_mapping_t;
+typedef struct okl4_cell_management_segments okl4_cell_management_segments_t;
+typedef struct okl4_cell_management_vcpus okl4_cell_management_vcpus_t;
+typedef struct _okl4_env okl4_env_t;
+typedef struct okl4_env_access_cell okl4_env_access_cell_t;
+typedef struct okl4_env_access_entry okl4_env_access_entry_t;
+typedef struct okl4_env_access_table okl4_env_access_table_t;
+typedef struct okl4_env_args okl4_env_args_t;
+typedef struct okl4_env_interrupt_device_map okl4_env_interrupt_device_map_t;
+typedef struct okl4_interrupt okl4_interrupt_t;
+typedef struct okl4_env_interrupt_handle okl4_env_interrupt_handle_t;
+typedef struct okl4_env_interrupt_list okl4_env_interrupt_list_t;
+typedef struct okl4_env_profile_cell okl4_env_profile_cell_t;
+typedef struct okl4_env_profile_cpu okl4_env_profile_cpu_t;
+typedef struct okl4_env_profile_table okl4_env_profile_table_t;
+typedef struct okl4_env_segment okl4_env_segment_t;
+typedef struct okl4_env_segment_table okl4_env_segment_table_t;
+typedef struct okl4_firmware_segment okl4_firmware_segment_t;
+typedef struct okl4_firmware_segments_info okl4_firmware_segments_info_t;
+typedef void (*okl4_irq_callback_t)(okl4_interrupt_number_t irq, void *opaque);
+typedef struct okl4_kmmu okl4_kmmu_t;
+typedef struct okl4_ksp_user_agent okl4_ksp_user_agent_t;
+typedef struct okl4_pipe_data okl4_pipe_data_t;
+typedef struct okl4_pipe_ep_data okl4_pipe_ep_data_t;
+typedef struct okl4_link okl4_link_t;
+typedef struct okl4_links okl4_links_t;
+typedef struct okl4_machine_info okl4_machine_info_t;
+typedef struct okl4_merged_physpool okl4_merged_physpool_t;
+typedef struct okl4_microvisor_timer okl4_microvisor_timer_t;
+typedef struct okl4_schedule_profile_data okl4_schedule_profile_data_t;
+typedef struct okl4_shared_buffer okl4_shared_buffer_t;
+typedef struct okl4_shared_buffers_array okl4_shared_buffers_array_t;
+typedef struct okl4_tracebuffer_env okl4_tracebuffer_env_t;
+typedef struct okl4_vclient_info okl4_vclient_info_t;
+typedef struct okl4_vcpu_entry okl4_vcpu_entry_t;
+typedef struct okl4_vcpu_table okl4_vcpu_table_t;
+typedef struct okl4_virtmem_pool okl4_virtmem_pool_t;
+typedef struct okl4_virtual_interrupt_lines okl4_virtual_interrupt_lines_t;
+typedef struct okl4_vserver_info okl4_vserver_info_t;
+typedef struct okl4_vservices_service_descriptor okl4_vservices_service_descriptor_t;
+typedef struct okl4_vservices_transport_microvisor okl4_vservices_transport_microvisor_t;
+typedef struct okl4_vservices_transports okl4_vservices_transports_t;
+
+/*
+ * Return structures from system calls.
+ */
+/*lint -save -e958 -e959 implicit padding */
+struct _okl4_sys_axon_process_recv_return {
+ okl4_error_t error;
+ okl4_bool_t send_empty;
+};
+
+struct _okl4_sys_axon_set_halted_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_area_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_queue_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_segment_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_area_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_queue_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_segment_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_axon_trigger_send_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_ack_return {
+ okl4_interrupt_number_t irq;
+ uint8_t source;
+};
+
+struct _okl4_sys_interrupt_attach_private_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_attach_shared_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_detach_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_dist_enable_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_eoi_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_get_highest_priority_pending_return {
+ okl4_interrupt_number_t irq;
+ uint8_t source;
+};
+
+struct _okl4_sys_interrupt_get_payload_return {
+ okl4_error_t error;
+ okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_interrupt_limits_return {
+ okl4_count_t cpunumber;
+ okl4_count_t itnumber;
+};
+
+struct _okl4_sys_interrupt_mask_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_raise_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_binary_point_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_config_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_control_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_mask_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_targets_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_unmask_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_kdb_set_object_name_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_ksp_procedure_call_return {
+ okl4_error_t error;
+ okl4_ksp_arg_t ret0;
+ okl4_ksp_arg_t ret1;
+ okl4_ksp_arg_t ret2;
+};
+
+struct _okl4_sys_mmu_attach_segment_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_detach_segment_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_pn_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_lookup_page_return {
+ okl4_error_t error;
+ okl4_psize_tr_t offset;
+ okl4_mmu_lookup_size_t size;
+ _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_lookup_pn_return {
+ okl4_mmu_lookup_index_t segment_index;
+ okl4_psize_pn_t offset_pn;
+ okl4_lsize_pn_t count_pn;
+ _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_map_page_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_map_pn_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_page_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_pn_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_attrs_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_perms_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_attrs_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_perms_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_performance_null_syscall_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_control_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_recv_return {
+ okl4_error_t error;
+ okl4_ksize_t size;
+};
+
+struct _okl4_sys_pipe_send_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_priority_waive_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_register_return {
+ uint32_t reg_w0;
+ uint32_t reg_w1;
+ uint32_t reg_w2;
+ uint32_t reg_w3;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_registers_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_read_memory32_return {
+ uint32_t data;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_register_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_registers_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_remote_write_memory32_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_metrics_status_suspended_return {
+ okl4_error_t error;
+ uint32_t power_suspend_version;
+ uint32_t power_suspend_running_count;
+};
+
+struct _okl4_sys_schedule_metrics_watch_suspended_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_disable_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_enable_return {
+ okl4_error_t error;
+ uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_cpu_get_data_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_disable_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_enable_return {
+ okl4_error_t error;
+ uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_get_data_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_scheduler_suspend_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_timer_cancel_return {
+ uint64_t remaining;
+ okl4_timer_flags_t old_flags;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_resolution_return {
+ uint64_t tick_freq;
+ uint32_t a;
+ uint32_t b;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_time_return {
+ uint64_t time;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_timer_query_return {
+ uint64_t remaining;
+ okl4_timer_flags_t active_flags;
+ okl4_error_t error;
+};
+
+struct _okl4_sys_timer_start_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_reset_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_start_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_stop_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_switch_mode_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_clear_and_raise_return {
+ okl4_error_t error;
+ okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_vinterrupt_modify_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_raise_return {
+ okl4_error_t error;
+};
+
+/*lint -restore */
+
+/*
+ * Ensure type sizes have been correctly calculated by the
+ * code generator. We test to see if the C compiler agrees
+ * with us about the size of the type.
+ */
+
+#if !defined(GLOBAL_STATIC_ASSERT)
+#if defined(__cplusplus)
+/* FIX: we should be able to use static_assert, but it doesn't compile */
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#else
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#define GLOBAL_STATIC_ASSERT(expr, msg) \
+ _Static_assert(expr, #msg);
+#else
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#endif
+#endif
+#endif
+
+
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_mpidr_t) == 8U,
+ __autogen_confused_about_sizeof_arm_mpidr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_mpidr_t) == 8U,
+ __autogen_confused_about_alignof_arm_mpidr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_function_t) == 4U,
+ __autogen_confused_about_sizeof_arm_psci_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_function_t) == 4U,
+ __autogen_confused_about_alignof_arm_psci_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_result_t) == 4U,
+ __autogen_confused_about_sizeof_arm_psci_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_result_t) == 4U,
+ __autogen_confused_about_alignof_arm_psci_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_suspend_state_t) == 4U,
+ __autogen_confused_about_sizeof_arm_psci_suspend_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_suspend_state_t) == 4U,
+ __autogen_confused_about_alignof_arm_psci_suspend_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_sctlr_t) == 4U,
+ __autogen_confused_about_sizeof_arm_sctlr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_sctlr_t) == 4U,
+ __autogen_confused_about_alignof_arm_sctlr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_arch_function_t) == 4U,
+ __autogen_confused_about_sizeof_arm_smccc_arch_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_arch_function_t) == 4U,
+ __autogen_confused_about_alignof_arm_smccc_arch_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_result_t) == 4U,
+ __autogen_confused_about_sizeof_arm_smccc_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_result_t) == 4U,
+ __autogen_confused_about_alignof_arm_smccc_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_register) == 8U,
+ __autogen_confused_about_sizeof_atomic_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_register) == 8U,
+ __autogen_confused_about_alignof_atomic_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_register_t) == 8U,
+ __autogen_confused_about_sizeof_atomic_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_register_t) == 8U,
+ __autogen_confused_about_alignof_atomic_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint16) == 2U,
+ __autogen_confused_about_sizeof_atomic_uint16)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint16) == 2U,
+ __autogen_confused_about_alignof_atomic_uint16)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint16_t) == 2U,
+ __autogen_confused_about_sizeof_atomic_uint16_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint16_t) == 2U,
+ __autogen_confused_about_alignof_atomic_uint16_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint32) == 4U,
+ __autogen_confused_about_sizeof_atomic_uint32)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint32) == 4U,
+ __autogen_confused_about_alignof_atomic_uint32)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint32_t) == 4U,
+ __autogen_confused_about_sizeof_atomic_uint32_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint32_t) == 4U,
+ __autogen_confused_about_alignof_atomic_uint32_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint64) == 8U,
+ __autogen_confused_about_sizeof_atomic_uint64)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint64) == 8U,
+ __autogen_confused_about_alignof_atomic_uint64)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint64_t) == 8U,
+ __autogen_confused_about_sizeof_atomic_uint64_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint64_t) == 8U,
+ __autogen_confused_about_alignof_atomic_uint64_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint8) == 1U,
+ __autogen_confused_about_sizeof_atomic_uint8)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint8) == 1U,
+ __autogen_confused_about_alignof_atomic_uint8)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint8_t) == 1U,
+ __autogen_confused_about_sizeof_atomic_uint8_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint8_t) == 1U,
+ __autogen_confused_about_alignof_atomic_uint8_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_data) == 12U,
+ __autogen_confused_about_sizeof_axon_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_data) == 4U,
+ __autogen_confused_about_alignof_axon_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_data_info_t) == 8U,
+ __autogen_confused_about_sizeof_axon_data_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_data_info_t) == 8U,
+ __autogen_confused_about_alignof_axon_data_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_ep_data) == 24U,
+ __autogen_confused_about_sizeof_axon_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_ep_data) == 4U,
+ __autogen_confused_about_alignof_axon_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue) == 12U,
+ __autogen_confused_about_sizeof_axon_queue)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue) == 4U,
+ __autogen_confused_about_alignof_axon_queue)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue_entry) == 24U,
+ __autogen_confused_about_sizeof_axon_queue_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue_entry) == 8U,
+ __autogen_confused_about_alignof_axon_queue_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_queue_size_t) == 2U,
+ __autogen_confused_about_sizeof_axon_queue_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_queue_size_t) == 2U,
+ __autogen_confused_about_alignof_axon_queue_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_rx) == 56U,
+ __autogen_confused_about_sizeof_axon_rx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_rx) == 4U,
+ __autogen_confused_about_alignof_axon_rx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_tx) == 48U,
+ __autogen_confused_about_sizeof_axon_tx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_tx) == 4U,
+ __autogen_confused_about_alignof_axon_tx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_virq_flags_t) == 8U,
+ __autogen_confused_about_sizeof_axon_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_virq_flags_t) == 8U,
+ __autogen_confused_about_alignof_axon_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_cache_t) == 4U,
+ __autogen_confused_about_sizeof_cache_attr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_cache_t) == 4U,
+ __autogen_confused_about_alignof_cache_attr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_range_item) == 16U,
+ __autogen_confused_about_sizeof_range_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_range_item) == 8U,
+ __autogen_confused_about_alignof_range_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_item) == 16U,
+ __autogen_confused_about_sizeof_virtmem_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_item) == 8U,
+ __autogen_confused_about_alignof_virtmem_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_item) == 104U,
+ __autogen_confused_about_sizeof_cell_management_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_item) == 8U,
+ __autogen_confused_about_alignof_cell_management_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management) == 8U,
+ __autogen_confused_about_sizeof_cell_management)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management) == 8U,
+ __autogen_confused_about_alignof_cell_management)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_segment_mapping) == 32U,
+ __autogen_confused_about_sizeof_segment_mapping)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_segment_mapping) == 8U,
+ __autogen_confused_about_alignof_segment_mapping)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_segments) == 8U,
+ __autogen_confused_about_sizeof_cell_management_segments)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_segments) == 8U,
+ __autogen_confused_about_alignof_cell_management_segments)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_vcpus) == 4U,
+ __autogen_confused_about_sizeof_cell_management_vcpus)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_vcpus) == 4U,
+ __autogen_confused_about_alignof_cell_management_vcpus)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_mode_t) == 4U,
+ __autogen_confused_about_sizeof_cpu_mode)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_mode_t) == 4U,
+ __autogen_confused_about_alignof_cpu_mode)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_hdr) == 4U,
+ __autogen_confused_about_sizeof_env_hdr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_hdr) == 2U,
+ __autogen_confused_about_alignof_env_hdr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_item) == 16U,
+ __autogen_confused_about_sizeof_env_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_item) == 8U,
+ __autogen_confused_about_alignof_env_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env) == 8U,
+ __autogen_confused_about_sizeof_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env) == 8U,
+ __autogen_confused_about_alignof_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_cell) == 16U,
+ __autogen_confused_about_sizeof_env_access_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_cell) == 8U,
+ __autogen_confused_about_alignof_env_access_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_perms_t) == 4U,
+ __autogen_confused_about_sizeof_page_perms)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_perms_t) == 4U,
+ __autogen_confused_about_alignof_page_perms)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_entry) == 48U,
+ __autogen_confused_about_sizeof_env_access_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_entry) == 8U,
+ __autogen_confused_about_alignof_env_access_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_table) == 24U,
+ __autogen_confused_about_sizeof_env_access_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_table) == 8U,
+ __autogen_confused_about_alignof_env_access_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_args) == 8U,
+ __autogen_confused_about_sizeof_env_args)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_args) == 8U,
+ __autogen_confused_about_alignof_env_args)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_device_map) == 4U,
+ __autogen_confused_about_sizeof_env_interrupt_device_map)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_device_map) == 4U,
+ __autogen_confused_about_alignof_env_interrupt_device_map)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_interrupt) == 4U,
+ __autogen_confused_about_sizeof_okl4_interrupt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_interrupt) == 4U,
+ __autogen_confused_about_alignof_okl4_interrupt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_handle) == 8U,
+ __autogen_confused_about_sizeof_env_interrupt_handle)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_handle) == 4U,
+ __autogen_confused_about_alignof_env_interrupt_handle)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_list) == 24U,
+ __autogen_confused_about_sizeof_env_interrupt_list)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_list) == 8U,
+ __autogen_confused_about_alignof_env_interrupt_list)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cell) == 48U,
+ __autogen_confused_about_sizeof_env_profile_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cell) == 8U,
+ __autogen_confused_about_alignof_env_profile_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cpu) == 4U,
+ __autogen_confused_about_sizeof_env_profile_cpu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cpu) == 4U,
+ __autogen_confused_about_alignof_env_profile_cpu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_table) == 16U,
+ __autogen_confused_about_sizeof_env_profile_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_table) == 8U,
+ __autogen_confused_about_alignof_env_profile_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment) == 24U,
+ __autogen_confused_about_sizeof_env_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment) == 8U,
+ __autogen_confused_about_alignof_env_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment_table) == 8U,
+ __autogen_confused_about_sizeof_env_segment_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment_table) == 8U,
+ __autogen_confused_about_alignof_env_segment_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_error_t) == 4U,
+ __autogen_confused_about_sizeof_error_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_error_t) == 4U,
+ __autogen_confused_about_alignof_error_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segment) == 32U,
+ __autogen_confused_about_sizeof_firmware_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segment) == 8U,
+ __autogen_confused_about_alignof_firmware_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segments_info) == 8U,
+ __autogen_confused_about_sizeof_firmware_segments_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segments_info) == 8U,
+ __autogen_confused_about_alignof_firmware_segments_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_icfgr_t) == 4U,
+ __autogen_confused_about_sizeof_gicd_icfgr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_icfgr_t) == 4U,
+ __autogen_confused_about_alignof_gicd_icfgr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sgi_target_t) == 4U,
+ __autogen_confused_about_sizeof_sgi_target)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sgi_target_t) == 4U,
+ __autogen_confused_about_alignof_sgi_target)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_sgir_t) == 4U,
+ __autogen_confused_about_sizeof_gicd_sgir)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_sgir_t) == 4U,
+ __autogen_confused_about_alignof_gicd_sgir)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_kmmu) == 4U,
+ __autogen_confused_about_sizeof_kmmu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_kmmu) == 4U,
+ __autogen_confused_about_alignof_kmmu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_ksp_user_agent) == 8U,
+ __autogen_confused_about_sizeof_ksp_user_agent)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_ksp_user_agent) == 4U,
+ __autogen_confused_about_alignof_ksp_user_agent)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_data) == 8U,
+ __autogen_confused_about_sizeof_pipe_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_data) == 4U,
+ __autogen_confused_about_alignof_pipe_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_ep_data) == 16U,
+ __autogen_confused_about_sizeof_pipe_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_ep_data) == 4U,
+ __autogen_confused_about_alignof_pipe_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_role_t) == 4U,
+ __autogen_confused_about_sizeof_link_role)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_role_t) == 4U,
+ __autogen_confused_about_alignof_link_role)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_transport_type_t) == 4U,
+ __autogen_confused_about_sizeof_link_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_transport_type_t) == 4U,
+ __autogen_confused_about_alignof_link_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_link) == 80U,
+ __autogen_confused_about_sizeof_link)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_link) == 8U,
+ __autogen_confused_about_alignof_link)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_links) == 8U,
+ __autogen_confused_about_sizeof_links)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_links) == 8U,
+ __autogen_confused_about_alignof_links)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_machine_info) == 24U,
+ __autogen_confused_about_sizeof_machine_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_machine_info) == 8U,
+ __autogen_confused_about_alignof_machine_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_merged_physpool) == 16U,
+ __autogen_confused_about_sizeof_merged_physpool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_merged_physpool) == 8U,
+ __autogen_confused_about_alignof_merged_physpool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_microvisor_timer) == 8U,
+ __autogen_confused_about_sizeof_microvisor_timer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_microvisor_timer) == 4U,
+ __autogen_confused_about_alignof_microvisor_timer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_index_t) == 4U,
+ __autogen_confused_about_sizeof_mmu_lookup_index)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_index_t) == 4U,
+ __autogen_confused_about_alignof_mmu_lookup_index)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_size_t) == 8U,
+ __autogen_confused_about_sizeof_mmu_lookup_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_size_t) == 8U,
+ __autogen_confused_about_alignof_mmu_lookup_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_page_attribute_t) == 4U,
+ __autogen_confused_about_sizeof_page_attribute)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_page_attribute_t) == 4U,
+ __autogen_confused_about_alignof_page_attribute)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_control_t) == 1U,
+ __autogen_confused_about_sizeof_pipe_control)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_control_t) == 1U,
+ __autogen_confused_about_alignof_pipe_control)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_state_t) == 1U,
+ __autogen_confused_about_sizeof_pipe_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_state_t) == 1U,
+ __autogen_confused_about_alignof_pipe_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_set_t) == 4U,
+ __autogen_confused_about_sizeof_register_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_set_t) == 4U,
+ __autogen_confused_about_alignof_register_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_and_set_t) == 4U,
+ __autogen_confused_about_sizeof_register_and_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_and_set_t) == 4U,
+ __autogen_confused_about_alignof_register_and_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cpu_registers) == 448U,
+ __autogen_confused_about_sizeof_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cpu_registers) == 8U,
+ __autogen_confused_about_alignof_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_registers_t) == 448U,
+ __autogen_confused_about_sizeof_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_registers_t) == 8U,
+ __autogen_confused_about_alignof_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_schedule_profile_data) == 32U,
+ __autogen_confused_about_sizeof_schedule_profile_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_schedule_profile_data) == 8U,
+ __autogen_confused_about_alignof_schedule_profile_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_scheduler_virq_flags_t) == 8U,
+ __autogen_confused_about_sizeof_scheduler_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_scheduler_virq_flags_t) == 8U,
+ __autogen_confused_about_alignof_scheduler_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sdk_version_t) == 4U,
+ __autogen_confused_about_sizeof_sdk_version)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sdk_version_t) == 4U,
+ __autogen_confused_about_alignof_sdk_version)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffer) == 32U,
+ __autogen_confused_about_sizeof_shared_buffer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffer) == 8U,
+ __autogen_confused_about_alignof_shared_buffer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffers_array) == 16U,
+ __autogen_confused_about_sizeof_shared_buffers_array)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffers_array) == 8U,
+ __autogen_confused_about_alignof_shared_buffers_array)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_timer_flags_t) == 4U,
+ __autogen_confused_about_sizeof_timer_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_timer_flags_t) == 4U,
+ __autogen_confused_about_alignof_timer_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_buffer_header) == 40U,
+ __autogen_confused_about_sizeof_tracebuffer_buffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_buffer_header) == 8U,
+ __autogen_confused_about_alignof_tracebuffer_buffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracebuffer_env) == 24U,
+ __autogen_confused_about_sizeof_tracebuffer_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracebuffer_env) == 8U,
+ __autogen_confused_about_alignof_tracebuffer_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_header) == 40U,
+ __autogen_confused_about_sizeof_tracebuffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_header) == 8U,
+ __autogen_confused_about_alignof_tracebuffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_class_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_class)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_class_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_class)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_desc_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_desc)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_desc_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_desc)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_masks_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_masks)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_masks_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_masks)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_entry_base) == 12U,
+ __autogen_confused_about_sizeof_tracepoint_entry_base)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_entry_base) == 4U,
+ __autogen_confused_about_alignof_tracepoint_entry_base)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_evt_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_evt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_evt_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_evt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_level_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_level)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_level_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_level)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_subsystem_t) == 4U,
+ __autogen_confused_about_sizeof_tracepoint_subsystem)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_subsystem_t) == 4U,
+ __autogen_confused_about_alignof_tracepoint_subsystem)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_unpacked_entry) == 12U,
+ __autogen_confused_about_sizeof_tracepoint_unpacked_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_unpacked_entry) == 4U,
+ __autogen_confused_about_alignof_tracepoint_unpacked_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vclient_info) == 32U,
+ __autogen_confused_about_sizeof_vclient_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vclient_info) == 8U,
+ __autogen_confused_about_alignof_vclient_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_entry) == 24U,
+ __autogen_confused_about_sizeof_vcpu_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_entry) == 8U,
+ __autogen_confused_about_alignof_vcpu_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_table) == 16U,
+ __autogen_confused_about_sizeof_vcpu_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_table) == 8U,
+ __autogen_confused_about_alignof_vcpu_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_ctrl_registers) == 8U,
+ __autogen_confused_about_sizeof_vfp_ctrl_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_ctrl_registers) == 4U,
+ __autogen_confused_about_alignof_vfp_ctrl_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ctrl_registers_t) == 8U,
+ __autogen_confused_about_sizeof_vfp_ctrl_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ctrl_registers_t) == 4U,
+ __autogen_confused_about_alignof_vfp_ctrl_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ops_t) == 4U,
+ __autogen_confused_about_sizeof_vfp_ops)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ops_t) == 4U,
+ __autogen_confused_about_alignof_vfp_ops)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_register) == 16U,
+ __autogen_confused_about_sizeof_vfp_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_register) == 16U,
+ __autogen_confused_about_alignof_vfp_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_register_t) == 16U,
+ __autogen_confused_about_sizeof_vfp_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_register_t) == 16U,
+ __autogen_confused_about_alignof_vfp_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_registers) == 528U,
+ __autogen_confused_about_sizeof_vfp_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_registers) == 16U,
+ __autogen_confused_about_alignof_vfp_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_registers_t) == 528U,
+ __autogen_confused_about_sizeof_vfp_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_registers_t) == 16U,
+ __autogen_confused_about_alignof_vfp_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_pool) == 16U,
+ __autogen_confused_about_sizeof_virtmem_pool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_pool) == 8U,
+ __autogen_confused_about_alignof_virtmem_pool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtual_interrupt_lines) == 16U,
+ __autogen_confused_about_sizeof_virtual_interrupt_lines)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtual_interrupt_lines) == 8U,
+ __autogen_confused_about_alignof_virtual_interrupt_lines)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vserver_info) == 32U,
+ __autogen_confused_about_sizeof_vserver_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vserver_info) == 8U,
+ __autogen_confused_about_alignof_vserver_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_service_descriptor) == 24U,
+ __autogen_confused_about_sizeof_vservices_service_descriptor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_service_descriptor) == 8U,
+ __autogen_confused_about_alignof_vservices_service_descriptor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vservices_transport_type_t) == 4U,
+ __autogen_confused_about_sizeof_vservices_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vservices_transport_type_t) == 4U,
+ __autogen_confused_about_alignof_vservices_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transport_microvisor) == 120U,
+ __autogen_confused_about_sizeof_vservices_transport_microvisor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transport_microvisor) == 8U,
+ __autogen_confused_about_alignof_vservices_transport_microvisor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transports) == 16U,
+ __autogen_confused_about_sizeof_vservices_transports)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
+ __autogen_confused_about_alignof_vservices_transports)
+#endif
+
+#else
+
+/**
+ * okl4_arm_mpidr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF0_ARM_MPIDR (255)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF1_ARM_MPIDR (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF2_ARM_MPIDR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_MT_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MT_ARM_MPIDR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_U_ARM_MPIDR) */
+#define OKL4_ASM_MASK_U_ARM_MPIDR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_U_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_U_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_MP_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MP_ARM_MPIDR (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF3_ARM_MPIDR (255 << 32)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/**
+ * uint32_t
+ **/
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES (4)
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES (4)
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON (3735928559)
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF (-1)
+
+/**
+ * okl4_arm_psci_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF (0x2)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON (0x3)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO (0x4)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE (0x5)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE (0x6)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU (0x7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF (0x8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET (0x9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES (0xa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE (0xb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND (0xc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE (0xd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND (0xe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE (0xf)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY (0x10)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT (0x11)
+
+/**
+ * okl4_arm_psci_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS (0xfffffff7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DISABLED (0xfffffff8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT (0xfffffff9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE (0xfffffffa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING (0xfffffffb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON (0xfffffffc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DENIED (0xfffffffd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS (0xfffffffe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ * okl4_arm_psci_suspend_state_t
+ **/
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU (0)
+
+/*lint -esym(621, OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/**
+ * okl4_arm_sctlr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR (1 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR (1 << 9)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED11_ARM_SCTLR (1 << 11)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1 << 12)
+/*lint -esym(621, OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR (1 << 13)
+/*lint -esym(621, OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR (1 << 14)
+/*lint -esym(621, OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR (1 << 18)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 19)
+/*lint -esym(621, OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 20)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED22_ARM_SCTLR (1 << 22)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED23_ARM_SCTLR (1 << 23)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR (1 << 25)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR (1 << 29)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/**
+ * okl4_arm_smccc_arch_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 (0x8000)
+
+/**
+ * okl4_arm_smccc_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ * okl4_count_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS (12)
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK (1023)
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS (256)
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS (-1)
+
+/**
+ * okl4_kcap_t
+ **/
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID (-1)
+
+/**
+ * okl4_interrupt_number_t
+ **/
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ (1023)
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ (1023)
+
+/**
+ * okl4_lsize_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE (4096)
+
+/**
+ * okl4_laddr_t
+ **/
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END (17592186044416)
+
+/**
+ * okl4_axon_data_info_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_USR_AXON_DATA_INFO (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_LADDR_AXON_DATA_INFO (2305843009213693951 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/**
+ * okl4_axon_queue_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (31)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE (31 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/**
+ * okl4_axon_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/**
+ * okl4_page_cache_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_ASM_PAGE_CACHE_WRITECOMBINE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEFAULT) */
+#define OKL4_ASM_PAGE_CACHE_DEFAULT (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_RX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_RX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_TX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_TX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_ASM_PAGE_CACHE_TRACEBUFFER (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITEBACK) */
+#define OKL4_ASM_PAGE_CACHE_WRITEBACK (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_ASM_PAGE_CACHE_WRITETHROUGH (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_GRE (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_NGRE (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_STRONG) */
+#define OKL4_ASM_PAGE_CACHE_STRONG (0x7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_MASK) */
+#define OKL4_ASM_PAGE_CACHE_HW_MASK (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE (0x8000004)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE (0x8000008)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE (0x800000c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH (0x8000011)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH (0x8000012)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH (0x8000013)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH (0x8000014)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH (0x8000015)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH (0x8000016)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH (0x8000017)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH (0x8000018)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH (0x8000019)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH (0x800001a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH (0x800001b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH (0x800001c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH (0x800001d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH (0x800001e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH (0x800001f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH (0x8000021)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH (0x8000022)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH (0x8000023)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH (0x8000024)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH (0x8000025)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH (0x8000026)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH (0x8000027)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH (0x8000028)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH (0x8000029)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH (0x800002a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH (0x800002b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH (0x800002c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH (0x800002d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH (0x800002e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH (0x800002f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH (0x8000031)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH (0x8000032)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH (0x8000033)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH (0x8000034)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH (0x8000035)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH (0x8000036)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH (0x8000037)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH (0x8000038)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH (0x8000039)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH (0x800003a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH (0x800003b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH (0x800003c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH (0x800003d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH (0x800003e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH (0x800003f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH (0x8000041)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH (0x8000042)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH (0x8000043)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_NSH (0x8000044)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH (0x8000045)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH (0x8000046)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH (0x8000047)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH (0x8000048)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH (0x8000049)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH (0x800004a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH (0x800004b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH (0x800004c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH (0x800004d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH (0x800004e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH (0x800004f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH (0x8000051)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH (0x8000052)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH (0x8000053)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH (0x8000054)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH (0x8000055)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH (0x8000056)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH (0x8000057)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH (0x8000058)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH (0x8000059)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH (0x800005a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH (0x800005b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH (0x800005c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH (0x800005d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH (0x800005e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH (0x800005f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH (0x8000061)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH (0x8000062)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH (0x8000063)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH (0x8000064)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH (0x8000065)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH (0x8000066)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH (0x8000067)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH (0x8000068)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH (0x8000069)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH (0x800006a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH (0x800006b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH (0x800006c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH (0x800006d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH (0x800006e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH (0x800006f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH (0x8000071)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH (0x8000072)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH (0x8000073)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH (0x8000074)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH (0x8000075)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH (0x8000076)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH (0x8000077)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH (0x8000078)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH (0x8000079)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH (0x800007a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH (0x800007b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH (0x800007c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH (0x800007d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH (0x800007e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH (0x800007f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH (0x8000081)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH (0x8000082)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH (0x8000083)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH (0x8000084)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH (0x8000085)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH (0x8000086)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH (0x8000087)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH (0x8000088)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH (0x8000089)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH (0x800008a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH (0x800008b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH (0x800008c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH (0x800008d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH (0x800008e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH (0x800008f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH (0x8000091)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH (0x8000092)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH (0x8000093)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH (0x8000094)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH (0x8000095)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH (0x8000096)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH (0x8000097)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH (0x8000098)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH (0x8000099)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH (0x800009a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH (0x800009b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH (0x800009c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH (0x800009d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH (0x800009e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH (0x800009f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH (0x80000a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH (0x80000a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH (0x80000a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH (0x80000a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH (0x80000a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH (0x80000a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH (0x80000a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH (0x80000a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH (0x80000a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH (0x80000aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH (0x80000ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH (0x80000ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH (0x80000ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH (0x80000ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH (0x80000af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH (0x80000b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH (0x80000b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH (0x80000b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH (0x80000b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH (0x80000b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH (0x80000b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH (0x80000b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH (0x80000b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH (0x80000b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH (0x80000ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH (0x80000bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH (0x80000bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH (0x80000bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH (0x80000be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH (0x80000bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH (0x80000c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH (0x80000c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH (0x80000c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH (0x80000c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH (0x80000c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH (0x80000c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH (0x80000c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH (0x80000c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH (0x80000c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH (0x80000ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH (0x80000cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH (0x80000cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH (0x80000cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH (0x80000ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH (0x80000cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH (0x80000d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH (0x80000d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH (0x80000d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH (0x80000d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH (0x80000d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH (0x80000d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH (0x80000d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH (0x80000d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH (0x80000d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH (0x80000da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH (0x80000db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH (0x80000dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH (0x80000dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH (0x80000de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH (0x80000df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH (0x80000e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH (0x80000e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH (0x80000e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH (0x80000e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH (0x80000e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH (0x80000e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH (0x80000e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH (0x80000e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH (0x80000e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH (0x80000ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH (0x80000eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH (0x80000ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH (0x80000ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH (0x80000ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH (0x80000ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH (0x80000f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH (0x80000f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH (0x80000f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH (0x80000f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH (0x80000f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH (0x80000f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH (0x80000f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH (0x80000f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH (0x80000f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH (0x80000fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH (0x80000fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH (0x80000fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH (0x80000fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH (0x80000fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH (0x80000ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH (0x8000211)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH (0x8000212)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH (0x8000213)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH (0x8000214)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH (0x8000215)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH (0x8000216)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH (0x8000217)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH (0x8000218)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH (0x8000219)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH (0x800021a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH (0x800021b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH (0x800021c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH (0x800021d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH (0x800021e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH (0x800021f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH (0x8000221)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH (0x8000222)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH (0x8000223)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH (0x8000224)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH (0x8000225)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH (0x8000226)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH (0x8000227)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH (0x8000228)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH (0x8000229)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH (0x800022a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH (0x800022b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH (0x800022c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH (0x800022d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH (0x800022e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH (0x800022f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH (0x8000231)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH (0x8000232)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH (0x8000233)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH (0x8000234)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH (0x8000235)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH (0x8000236)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH (0x8000237)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH (0x8000238)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH (0x8000239)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH (0x800023a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH (0x800023b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH (0x800023c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH (0x800023d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH (0x800023e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH (0x800023f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH (0x8000241)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH (0x8000242)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH (0x8000243)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_OSH (0x8000244)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH (0x8000245)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH (0x8000246)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH (0x8000247)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH (0x8000248)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH (0x8000249)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH (0x800024a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH (0x800024b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH (0x800024c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH (0x800024d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH (0x800024e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH (0x800024f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH (0x8000251)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH (0x8000252)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH (0x8000253)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH (0x8000254)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH (0x8000255)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH (0x8000256)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH (0x8000257)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH (0x8000258)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH (0x8000259)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH (0x800025a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH (0x800025b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH (0x800025c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH (0x800025d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH (0x800025e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH (0x800025f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH (0x8000261)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH (0x8000262)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH (0x8000263)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH (0x8000264)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH (0x8000265)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH (0x8000266)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH (0x8000267)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH (0x8000268)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH (0x8000269)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH (0x800026a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH (0x800026b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH (0x800026c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH (0x800026d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH (0x800026e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH (0x800026f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH (0x8000271)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH (0x8000272)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH (0x8000273)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH (0x8000274)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH (0x8000275)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH (0x8000276)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH (0x8000277)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH (0x8000278)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH (0x8000279)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH (0x800027a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH (0x800027b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH (0x800027c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH (0x800027d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH (0x800027e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH (0x800027f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH (0x8000281)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH (0x8000282)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH (0x8000283)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH (0x8000284)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH (0x8000285)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH (0x8000286)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH (0x8000287)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH (0x8000288)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH (0x8000289)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH (0x800028a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH (0x800028b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH (0x800028c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH (0x800028d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH (0x800028e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH (0x800028f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH (0x8000291)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH (0x8000292)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH (0x8000293)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH (0x8000294)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH (0x8000295)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH (0x8000296)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH (0x8000297)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH (0x8000298)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH (0x8000299)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH (0x800029a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH (0x800029b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH (0x800029c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH (0x800029d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH (0x800029e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH (0x800029f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH (0x80002a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH (0x80002a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH (0x80002a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH (0x80002a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH (0x80002a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH (0x80002a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH (0x80002a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH (0x80002a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH (0x80002a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH (0x80002aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH (0x80002ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH (0x80002ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH (0x80002ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH (0x80002ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH (0x80002af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH (0x80002b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH (0x80002b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH (0x80002b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH (0x80002b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH (0x80002b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH (0x80002b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH (0x80002b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH (0x80002b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH (0x80002b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH (0x80002ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH (0x80002bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH (0x80002bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH (0x80002bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH (0x80002be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH (0x80002bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH (0x80002c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH (0x80002c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH (0x80002c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH (0x80002c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH (0x80002c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH (0x80002c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH (0x80002c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH (0x80002c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH (0x80002c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH (0x80002ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH (0x80002cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH (0x80002cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH (0x80002cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH (0x80002ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH (0x80002cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH (0x80002d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH (0x80002d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH (0x80002d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH (0x80002d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH (0x80002d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH (0x80002d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH (0x80002d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH (0x80002d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH (0x80002d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH (0x80002da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH (0x80002db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH (0x80002dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH (0x80002dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH (0x80002de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH (0x80002df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH (0x80002e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH (0x80002e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH (0x80002e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH (0x80002e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH (0x80002e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH (0x80002e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH (0x80002e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH (0x80002e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH (0x80002e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH (0x80002ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH (0x80002eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH (0x80002ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH (0x80002ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH (0x80002ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH (0x80002ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH (0x80002f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH (0x80002f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH (0x80002f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH (0x80002f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH (0x80002f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH (0x80002f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH (0x80002f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH (0x80002f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH (0x80002f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH (0x80002fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH (0x80002fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH (0x80002fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH (0x80002fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH (0x80002fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH (0x80002ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH (0x8000311)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH (0x8000312)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH (0x8000313)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH (0x8000314)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH (0x8000315)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH (0x8000316)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH (0x8000317)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH (0x8000318)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH (0x8000319)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH (0x800031a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH (0x800031b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH (0x800031c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH (0x800031d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH (0x800031e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH (0x800031f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH (0x8000321)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH (0x8000322)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH (0x8000323)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH (0x8000324)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH (0x8000325)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH (0x8000326)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH (0x8000327)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH (0x8000328)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH (0x8000329)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH (0x800032a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH (0x800032b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH (0x800032c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH (0x800032d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH (0x800032e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH (0x800032f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH (0x8000331)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH (0x8000332)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH (0x8000333)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH (0x8000334)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH (0x8000335)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH (0x8000336)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH (0x8000337)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH (0x8000338)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH (0x8000339)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH (0x800033a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH (0x800033b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH (0x800033c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH (0x800033d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH (0x800033e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH (0x800033f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH (0x8000341)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH (0x8000342)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH (0x8000343)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_ISH (0x8000344)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH (0x8000345)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH (0x8000346)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH (0x8000347)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH (0x8000348)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH (0x8000349)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH (0x800034a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH (0x800034b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH (0x800034c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH (0x800034d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH (0x800034e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH (0x800034f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH (0x8000351)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH (0x8000352)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH (0x8000353)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH (0x8000354)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH (0x8000355)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH (0x8000356)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH (0x8000357)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH (0x8000358)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH (0x8000359)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH (0x800035a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH (0x800035b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH (0x800035c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH (0x800035d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH (0x800035e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH (0x800035f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH (0x8000361)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH (0x8000362)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH (0x8000363)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH (0x8000364)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH (0x8000365)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH (0x8000366)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH (0x8000367)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH (0x8000368)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH (0x8000369)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH (0x800036a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH (0x800036b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH (0x800036c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH (0x800036d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH (0x800036e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH (0x800036f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH (0x8000371)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH (0x8000372)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH (0x8000373)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH (0x8000374)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH (0x8000375)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH (0x8000376)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH (0x8000377)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH (0x8000378)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH (0x8000379)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH (0x800037a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH (0x800037b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH (0x800037c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH (0x800037d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH (0x800037e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH (0x800037f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH (0x8000381)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH (0x8000382)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH (0x8000383)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH (0x8000384)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH (0x8000385)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH (0x8000386)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH (0x8000387)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH (0x8000388)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH (0x8000389)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH (0x800038a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH (0x800038b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH (0x800038c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH (0x800038d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH (0x800038e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH (0x800038f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH (0x8000391)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH (0x8000392)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH (0x8000393)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH (0x8000394)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH (0x8000395)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH (0x8000396)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH (0x8000397)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH (0x8000398)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH (0x8000399)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH (0x800039a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH (0x800039b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH (0x800039c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH (0x800039d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH (0x800039e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH (0x800039f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH (0x80003a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH (0x80003a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH (0x80003a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH (0x80003a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH (0x80003a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH (0x80003a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH (0x80003a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH (0x80003a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH (0x80003a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH (0x80003aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH (0x80003ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH (0x80003ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH (0x80003ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH (0x80003ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH (0x80003af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH (0x80003b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH (0x80003b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH (0x80003b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH (0x80003b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH (0x80003b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH (0x80003b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH (0x80003b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH (0x80003b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH (0x80003b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH (0x80003ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH (0x80003bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH (0x80003bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH (0x80003bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH (0x80003be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH (0x80003bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH (0x80003c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH (0x80003c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH (0x80003c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH (0x80003c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH (0x80003c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH (0x80003c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH (0x80003c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH (0x80003c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH (0x80003c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH (0x80003ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH (0x80003cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH (0x80003cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH (0x80003cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH (0x80003ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH (0x80003cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH (0x80003d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH (0x80003d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH (0x80003d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH (0x80003d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH (0x80003d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH (0x80003d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH (0x80003d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH (0x80003d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH (0x80003d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH (0x80003da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH (0x80003db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH (0x80003dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH (0x80003dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH (0x80003de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH (0x80003df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH (0x80003e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH (0x80003e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH (0x80003e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH (0x80003e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH (0x80003e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH (0x80003e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH (0x80003e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH (0x80003e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH (0x80003e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH (0x80003ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH (0x80003eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH (0x80003ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH (0x80003ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH (0x80003ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH (0x80003ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH (0x80003f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH (0x80003f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH (0x80003f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH (0x80003f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH (0x80003f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH (0x80003f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH (0x80003f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH (0x80003f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH (0x80003f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH (0x80003fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH (0x80003fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH (0x80003fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH (0x80003fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH (0x80003fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH (0x80003ff)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_MAX) */
+#define OKL4_ASM_PAGE_CACHE_MAX (0x80003ff)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_INVALID) */
+#define OKL4_ASM_PAGE_CACHE_INVALID (0xffffffff)
+
+/**
+ * okl4_cpu_exec_mode
+ **/
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE (0)
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE (4)
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE (2)
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE (3)
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE (1)
+
+/**
+ * okl4_cpu_mode_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_MASK_EXEC_MODE_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_ASM_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_MASK_ENDIAN_CPU_MODE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/**
+ * okl4_page_perms_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_NONE) */
+#define OKL4_ASM_PAGE_PERMS_NONE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_X) */
+#define OKL4_ASM_PAGE_PERMS_X (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_W) */
+#define OKL4_ASM_PAGE_PERMS_W (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_WX) */
+#define OKL4_ASM_PAGE_PERMS_WX (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_R) */
+#define OKL4_ASM_PAGE_PERMS_R (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RX) */
+#define OKL4_ASM_PAGE_PERMS_RX (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RW) */
+#define OKL4_ASM_PAGE_PERMS_RW (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RWX) */
+#define OKL4_ASM_PAGE_PERMS_RWX (0x7)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_MAX) */
+#define OKL4_ASM_PAGE_PERMS_MAX (0x7)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_INVALID) */
+#define OKL4_ASM_PAGE_PERMS_INVALID (0xffffffff)
+
+/**
+ * okl4_error_t
+ **/
+/**
+ KSP returned OK
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_OK) */
+#define OKL4_ASM_ERROR_KSP_OK (0x0)
+/**
+ The operation succeeded
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_OK) */
+#define OKL4_ASM_ERROR_OK (0x0)
+/**
+ The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STARTED) */
+#define OKL4_ASM_ERROR_ALREADY_STARTED (0x1)
+/**
+ The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STOPPED) */
+#define OKL4_ASM_ERROR_ALREADY_STOPPED (0x2)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ASM_ERROR_AXON_AREA_TOO_BIG (0x3)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE (0x4)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ASM_ERROR_AXON_INVALID_OFFSET (0x5)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED (0x6)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY (0x7)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED (0x8)
+/**
+ A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_CANCELLED) */
+#define OKL4_ASM_ERROR_CANCELLED (0x9)
+/**
+ The operation failed due to an existing mapping. Mapping
+ operations must not overlap an existing mapping. Unmapping
+ must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_EXISTING_MAPPING) */
+#define OKL4_ASM_ERROR_EXISTING_MAPPING (0xa)
+/**
+ The operation requested with a segment failed due to
+ insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS (0xb)
+/**
+ The operation did not complete because it was interrupted by a
+ preemption. This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPTED) */
+#define OKL4_ASM_ERROR_INTERRUPTED (0xc)
+/**
+ Attempt to attach an interrupt to an IRQ number, when the
+ interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED (0xd)
+/**
+ Attempt to use an IRQ number that is out of range, of
+ the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ (0xe)
+/**
+ Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED (0xf)
+/**
+ An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ASM_ERROR_INVALID_ARGUMENT (0x10)
+/**
+ The operation failed because one of the arguments does not refer to a
+ valid object.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ASM_ERROR_INVALID_DESIGNATOR (0x11)
+/**
+ The operation failed because the power_state
+ argument is invalid.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ASM_ERROR_INVALID_POWER_STATE (0x12)
+/**
+ The operation failed because the given segment index does
+ not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX (0x13)
+/**
+ A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MEMORY_FAULT) */
+#define OKL4_ASM_ERROR_MEMORY_FAULT (0x14)
+/**
+ The operation failed because there is no mapping at the
+ specified location.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MISSING_MAPPING) */
+#define OKL4_ASM_ERROR_MISSING_MAPPING (0x15)
+/**
+ The delete operation failed because the KMMU context is not
+ empty.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT (0x16)
+/**
+ The lookup operation failed because the given virtual address
+ of the given KMMU context is not mapped at the given physical
+ segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ASM_ERROR_NOT_IN_SEGMENT (0x17)
+/**
+ The operation failed because the caller is not on the last
+ online cpu.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_LAST_CPU) */
+#define OKL4_ASM_ERROR_NOT_LAST_CPU (0x18)
+/**
+ Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NO_RESOURCES) */
+#define OKL4_ASM_ERROR_NO_RESOURCES (0x19)
+/**
+ Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ASM_ERROR_PIPE_BAD_STATE (0x1a)
+/**
+ Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_EMPTY) */
+#define OKL4_ASM_ERROR_PIPE_EMPTY (0x1b)
+/**
+ Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_FULL) */
+#define OKL4_ASM_ERROR_PIPE_FULL (0x1c)
+/**
+ Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_NOT_READY) */
+#define OKL4_ASM_ERROR_PIPE_NOT_READY (0x1d)
+/**
+ Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW (0x1e)
+/**
+ The operation failed because at least one VCPU has a monitored
+ power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ASM_ERROR_POWER_VCPU_RESUMED (0x1f)
+/**
+ The operation requires a segment to be unused, or not attached
+ to an MMU context.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_SEGMENT_USED) */
+#define OKL4_ASM_ERROR_SEGMENT_USED (0x20)
+/*lint -esym(621, OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED (0x21)
+/**
+ The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_ACTIVE) */
+#define OKL4_ASM_ERROR_TIMER_ACTIVE (0x22)
+/**
+ The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_CANCELLED) */
+#define OKL4_ASM_ERROR_TIMER_CANCELLED (0x23)
+/**
+ Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TRY_AGAIN) */
+#define OKL4_ASM_ERROR_TRY_AGAIN (0x24)
+/**
+ The non-blocking operation failed because it would
+ block on a resource.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_WOULD_BLOCK) */
+#define OKL4_ASM_ERROR_WOULD_BLOCK (0x25)
+/**
+ Insufficient resources
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ASM_ERROR_ALLOC_EXHAUSTED (0x26)
+/**
+ KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_0) */
+#define OKL4_ASM_ERROR_KSP_ERROR_0 (0x10000010)
+/**
+ KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_1) */
+#define OKL4_ASM_ERROR_KSP_ERROR_1 (0x10000011)
+/**
+ KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_2) */
+#define OKL4_ASM_ERROR_KSP_ERROR_2 (0x10000012)
+/**
+ KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_3) */
+#define OKL4_ASM_ERROR_KSP_ERROR_3 (0x10000013)
+/**
+ KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_4) */
+#define OKL4_ASM_ERROR_KSP_ERROR_4 (0x10000014)
+/**
+ KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_5) */
+#define OKL4_ASM_ERROR_KSP_ERROR_5 (0x10000015)
+/**
+ KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_6) */
+#define OKL4_ASM_ERROR_KSP_ERROR_6 (0x10000016)
+/**
+ KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_7) */
+#define OKL4_ASM_ERROR_KSP_ERROR_7 (0x10000017)
+/**
+ Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ASM_ERROR_KSP_INVALID_ARG (0x80000001)
+/**
+ KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED (0x80000002)
+/**
+ User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS (0x80000003)
+/**
+ Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED (0x80000004)
+/**
+ Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_NOT_IMPLEMENTED (0xffffffff)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MAX) */
+#define OKL4_ASM_ERROR_MAX (0xffffffff)
+
+/**
+ * okl4_gicd_icfgr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_MASK_EDGE_GICD_ICFGR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/**
+ * okl4_sgi_target_t
+ **/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_LISTED) */
+#define OKL4_ASM_SGI_TARGET_LISTED (0x0)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_ASM_SGI_TARGET_ALL_OTHERS (0x1)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_SELF) */
+#define OKL4_ASM_SGI_TARGET_SELF (0x2)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_MAX) */
+#define OKL4_ASM_SGI_TARGET_MAX (0x2)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_INVALID) */
+#define OKL4_ASM_SGI_TARGET_INVALID (0xffffffff)
+
+/**
+ * okl4_gicd_sgir_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_MASK_SGIINTID_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_ASM_MASK_NSATT_GICD_SGIR) */
+#define OKL4_ASM_MASK_NSATT_GICD_SGIR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/**
+ * okl4_link_role_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SYMMETRIC) */
+#define OKL4_ASM_LINK_ROLE_SYMMETRIC (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SERVER) */
+#define OKL4_ASM_LINK_ROLE_SERVER (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_CLIENT) */
+#define OKL4_ASM_LINK_ROLE_CLIENT (0x2)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_MAX) */
+#define OKL4_ASM_LINK_ROLE_MAX (0x2)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_INVALID) */
+#define OKL4_ASM_LINK_ROLE_INVALID (0xffffffff)
+
+/**
+ * okl4_link_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES (0x2)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_MAX (0x2)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+/**
+ * okl4_mmu_lookup_index_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/**
+ * okl4_mmu_lookup_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE (1023)
+/*lint -esym(621, OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE (18014398509481983 << 10)
+/*lint -esym(621, OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/**
+ * okl4_nanoseconds_t
+ **/
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS (36028797018963968)
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS (1000000)
+
+/**
+ * _okl4_page_attribute_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE (7)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE (268435455 << 4)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/**
+ * okl4_pipe_control_t
+ **/
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED (4)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET (0)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED (3)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY (2)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY (1)
+
+/*lint -esym(621, OKL4_ASM_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_OPERATION_PIPE_CONTROL (7 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/**
+ * okl4_pipe_state_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_RESET_PIPE_STATE) */
+#define OKL4_ASM_MASK_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_HALTED_PIPE_STATE) */
+#define OKL4_ASM_MASK_HALTED_PIPE_STATE (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_READY_PIPE_STATE (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_READY_PIPE_STATE (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_WAITING_PIPE_STATE) */
+#define OKL4_ASM_MASK_WAITING_PIPE_STATE (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/**
+ * okl4_power_state_t
+ **/
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE (0)
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE (256)
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF (1)
+
+/**
+ * okl4_register_set_t
+ **/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_CPU_REGS) */
+#define OKL4_ASM_REGISTER_SET_CPU_REGS (0x0)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_REGS (0x1)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS (0x2)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP64_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP64_REGS (0x3)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP128_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP128_REGS (0x4)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_MAX) */
+#define OKL4_ASM_REGISTER_SET_MAX (0x4)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_INVALID) */
+#define OKL4_ASM_REGISTER_SET_INVALID (0xffffffff)
+
+/**
+ * okl4_register_and_set_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_SET_REGISTER_AND_SET (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/**
+ * okl4_scheduler_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/**
+ * okl4_sdk_version_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION (63)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_MASK_RELEASE_SDK_VERSION (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_MASK_MINOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MINOR_SDK_VERSION (63 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAJOR_SDK_VERSION (15 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/**
+ * okl4_timer_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_UNITS_TIMER_FLAGS (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ALIGN_TIMER_FLAGS (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_RELOAD_TIMER_FLAGS (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/**
+ * okl4_tracepoint_class_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_PRIMARY (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SECONDARY (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_TERTIARY (0x4)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_MAX) */
+#define OKL4_ASM_TRACEPOINT_CLASS_MAX (0x4)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_ASM_TRACEPOINT_CLASS_INVALID (0xffffffff)
+
+/**
+ * _okl4_tracepoint_desc_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_ID_TRACEPOINT_DESC (255)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_USER_TRACEPOINT_DESC (1 << 8)
+/*lint -esym(621, _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC (1 << 9)
+/*lint -esym(621, _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC (63 << 10)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC (63 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC (63 << 22)
+/*lint -esym(621, _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK__R1_TRACEPOINT_DESC (15 << 28)
+/*lint -esym(621, _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/**
+ * _okl4_tracepoint_masks_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS (65535)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS (65535 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/**
+ * okl4_tracepoint_evt_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV (0x4)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED (0x5)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA (0x6)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE (0x7)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT (0x8)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA (0x9)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE (0xa)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT (0xb)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND (0xc)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xd)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0xe)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0xf)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x10)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x11)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x12)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x13)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x14)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x15)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x16)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x17)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x18)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x19)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x1e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x1f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x20)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x21)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x22)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x23)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x24)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x25)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x26)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x27)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x28)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x29)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x2e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x2f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x30)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x31)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x32)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x33)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x34)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x35)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x36)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x37)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x38)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x39)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x3e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x3f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x40)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x41)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x42)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND (0x43)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x44)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x45)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x46)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x47)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x48)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x49)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x4d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x4e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x4f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x50)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x51)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x52)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_MAX) */
+#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x52)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_INVALID) */
+#define OKL4_ASM_TRACEPOINT_EVT_INVALID (0xffffffff)
+
+/**
+ * okl4_tracepoint_level_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_DEBUG (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INFO (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_WARN (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL (0x3)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_MAX (0x3)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INVALID (0xffffffff)
+
+/**
+ * okl4_tracepoint_subsystem_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE (0x2)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX (0x2)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID (0xffffffff)
+
+/**
+ * okl4_vfp_ops_t
+ **/
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_MAX) */
+#define OKL4_ASM_VFP_OPS_MAX (0x0)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_INVALID) */
+#define OKL4_ASM_VFP_OPS_INVALID (0xffffffff)
+
+/**
+ * okl4_vservices_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON (0x0)
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER (0x1)
+/**
+ Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX (0x1)
+/**
+ Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+
+#endif /* !ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_TYPES_H__ */
+/** @} */
+/** @} */
diff --git a/include/microvisor/microvisor.h b/include/microvisor/microvisor.h
new file mode 100644
index 000000000000..3bb8d64b7dc8
--- /dev/null
+++ b/include/microvisor/microvisor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MICROVISOR_H_
+#define _MICROVISOR_H_
+
+/**
+ * @defgroup lib_microvisor The Microvisor Library
+ *
+ * @{
+ *
+ * The Microvisor Library is the primary low-level API between the OKL4
+ * Microvisor and a Cell application or guest-OS. It also provides certain
+ * common data types such as structure definitions used in these interactions.
+ *
+ */
+
+/**
+ * Temporarily define _Bool to allow C++ compilation of
+ * OKL code that makes use of it.
+ */
+#if defined(__cplusplus) && !defined(_Bool)
+#define _OKL4_CPP_BOOL
+#define _Bool bool
+#endif
+
+#define OKL4_INLINE static inline
+
+#if defined(_lint) || defined(_splint)
+#define OKL4_FORCE_INLINE static
+#else
+#define OKL4_FORCE_INLINE static inline __attribute__((always_inline))
+#endif
+
+#include <microvisor/kernel/types.h>
+#include <microvisor/kernel/microvisor.h>
+#include <microvisor/kernel/syscalls.h>
+#include <microvisor/kernel/offsets.h>
+
+/** @} */
+
+/**
+ * Remove temporary definition of _Bool if it was defined
+ */
+#if defined(_OKL4_CPP_BOOL)
+#undef _Bool
+#undef _OKL4_CPP_BOOL
+#endif
+
+#endif /* _MICROVISOR_H_ */
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index d9a526daeb35..75f017ce0dff 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,6 +41,7 @@ enum vmid {
VMID_CP_CAMERA_PREVIEW = 0x1D,
VMID_CP_SPSS_SP_SHARED = 0x22,
VMID_CP_SPSS_HLOS_SHARED = 0x24,
+ VMID_CP_CDSP = 0x2A,
VMID_LAST,
VMID_INVAL = -1
};
diff --git a/include/trace/events/hyp_core_ctl.h b/include/trace/events/hyp_core_ctl.h
new file mode 100644
index 000000000000..3a159e8f36a5
--- /dev/null
+++ b/include/trace/events/hyp_core_ctl.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyp_core_ctl
+
+#if !defined(_TRACE_HYP_CORE_CTL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HYP_CORE_CTL_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(hyp_core_ctl_enable,
+
+ TP_PROTO(bool enable),
+
+ TP_ARGS(enable),
+
+ TP_STRUCT__entry(
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->enable = enable;
+ ),
+
+ TP_printk("enable=%d", __entry->enable)
+);
+
+TRACE_EVENT(hyp_core_ctl_status,
+
+ TP_PROTO(struct hyp_core_ctl_data *hcd, const char *event),
+
+ TP_ARGS(hcd, event),
+
+ TP_STRUCT__entry(
+ __string(event, event)
+ __array(char, reserve, 32)
+ __array(char, reserved, 32)
+ __array(char, our_isolated, 32)
+ __array(char, online, 32)
+ __array(char, isolated, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(event, event);
+ scnprintf(__entry->reserve, sizeof(__entry->reserve), "%*pbl",
+ cpumask_pr_args(&hcd->reserve_cpus));
+ scnprintf(__entry->reserved, sizeof(__entry->reserve), "%*pbl",
+ cpumask_pr_args(&hcd->final_reserved_cpus));
+ scnprintf(__entry->our_isolated, sizeof(__entry->reserve),
+ "%*pbl", cpumask_pr_args(&hcd->our_isolated_cpus));
+ scnprintf(__entry->online, sizeof(__entry->reserve), "%*pbl",
+ cpumask_pr_args(cpu_online_mask));
+ scnprintf(__entry->isolated, sizeof(__entry->reserve), "%*pbl",
+ cpumask_pr_args(cpu_isolated_mask));
+ ),
+
+ TP_printk("event=%s reserve=%s reserved=%s our_isolated=%s online=%s isolated=%s",
+ __get_str(event), __entry->reserve, __entry->reserved,
+ __entry->our_isolated, __entry->online, __entry->isolated)
+);
+
+#endif /* _TRACE_HYP_CORE_CTL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 63f2bafe104f..7447d329bbba 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -749,9 +749,14 @@ DECLARE_EVENT_CLASS(sched_task_util,
__entry->target_cpu = target_cpu;
__entry->ediff = ediff;
__entry->need_idle = need_idle;
+#ifdef CONFIG_WALT
__entry->latency = p->ravg.mark_start ?
ktime_get_ns() -
p->ravg.mark_start : 0;
+#else
+ __entry->latency = 0;
+#endif
+
),
TP_printk("comm=%s pid=%d task_cpu=%d task_util=%lu nominated_cpu=%d target_cpu=%d energy_diff=%d need_idle=%d latency=%llu",
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9ccbf9ef..7bd8783a590f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
#define WB_WORK_REASON \
EM( WB_REASON_BACKGROUND, "background") \
- EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_VMSCAN, "vmscan") \
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ea727f2f8666..717ecaf116a0 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -508,3 +508,8 @@ header-y += msm_dsps.h
header-y += msm-core-interface.h
header-y += msm_rotator.h
header-y += nfc/
+
+ifneq ($(VSERVICES_SUPPORT), "")
+include include/linux/Kbuild.vservices
+endif
+header-y += okl4-link-shbuf.h
diff --git a/include/uapi/linux/okl4-link-shbuf.h b/include/uapi/linux/okl4-link-shbuf.h
new file mode 100644
index 000000000000..69561bc3ec10
--- /dev/null
+++ b/include/uapi/linux/okl4-link-shbuf.h
@@ -0,0 +1,40 @@
+/*
+ * User-visible interface to driver for inter-cell links using the
+ * shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_OKL4_LINK_SHBUF_H
+#define _LINUX_OKL4_LINK_SHBUF_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Ioctl that indicates a request to raise the outgoing vIRQ. This value is
+ * chosen to avoid conflict with the numbers documented in Linux 4.1's
+ * ioctl-numbers.txt. The argument is a payload to transmit to the receiver.
+ * Note that consecutive transmissions without an interleaved clear of the
+ * interrupt results in the payloads being ORed together.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_TX _IOW(0x8d, 1, __u64)
+
+/*
+ * Ioctl that indicates a request to clear any pending incoming vIRQ. The value
+ * returned through the argument to the ioctl is the payload, which is also
+ * cleared.
+ *
+ * The caller cannot distinguish between the cases of no pending interrupt and
+ * a pending interrupt with payload 0. It is expected that the caller is
+ * communicating with a cooperative sender and has polled their file descriptor
+ * to determine there is a pending interrupt before using this ioctl.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_CLR _IOR(0x8d, 2, __u64)
+
+#endif /* _LINUX_OKL4_LINK_SHBUF_H */
diff --git a/include/vservices/Kbuild b/include/vservices/Kbuild
new file mode 100644
index 000000000000..8b955fc84ef1
--- /dev/null
+++ b/include/vservices/Kbuild
@@ -0,0 +1,2 @@
+header-y += protocol/
+header-y += ioctl.h
diff --git a/include/vservices/buffer.h b/include/vservices/buffer.h
new file mode 100644
index 000000000000..910aa07769f2
--- /dev/null
+++ b/include/vservices/buffer.h
@@ -0,0 +1,239 @@
+/*
+ * include/vservices/buffer.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines simple wrapper types for strings and variable-size buffers
+ * that are stored inside Virtual Services message buffers.
+ */
+
+#ifndef _VSERVICES_BUFFER_H_
+#define _VSERVICES_BUFFER_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_string - Virtual Services fixed sized string type
+ * @ptr: String pointer
+ * @max_size: Maximum length of the string in bytes
+ *
+ * A handle to a possibly NUL-terminated string stored in a message buffer. If
+ * the size of the string equals to max_size, the string is not NUL-terminated.
+ * If the protocol does not specify an encoding, the encoding is assumed to be
+ * UTF-8. Wide character encodings are not supported by this type; use struct
+ * vs_pbuf for wide character strings.
+ */
+struct vs_string {
+ char *ptr;
+ size_t max_size;
+};
+
+/**
+ * vs_string_copyout - Copy a Virtual Services string to a C string buffer.
+ * @dest: C string to copy to
+ * @src: Virtual Services string to copy from
+ * @max_size: Size of the destination buffer, including the NUL terminator.
+ *
+ * The behaviour is similar to strlcpy(): that is, the copied string
+ * is guaranteed not to exceed the specified size (including the NUL
+ * terminator byte), and is guaranteed to be NUL-terminated as long as
+ * the size is nonzero (unlike strncpy()).
+ *
+ * The return value is the size of the input string (even if the output was
+ * truncated); this is to make truncation easy to detect.
+ */
+static inline size_t
+vs_string_copyout(char *dest, const struct vs_string *src, size_t max_size)
+{
+ size_t src_len = strnlen(src->ptr, src->max_size);
+
+ if (max_size) {
+ size_t dest_len = min(src_len, max_size - 1);
+
+ memcpy(dest, src->ptr, dest_len);
+ dest[dest_len] = '\0';
+ }
+ return src_len;
+}
+
+/**
+ * vs_string_copyin_len - Copy a C string, up to a given length, into a Virtual
+ * Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ * @max_size: Maximum number of bytes to copy
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin_len(struct vs_string *dest, const char *src, size_t max_size)
+{
+ strncpy(dest->ptr, src, min(max_size, dest->max_size));
+
+ return strnlen(dest->ptr, dest->max_size);
+}
+
+/**
+ * vs_string_copyin - Copy a C string into a Virtual Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin(struct vs_string *dest, const char *src)
+{
+ return vs_string_copyin_len(dest, src, dest->max_size);
+}
+
+/**
+ * vs_string_length - Return the size of the string stored in a Virtual Services
+ * string.
+ * @str: Virtual Service string to get the length of
+ */
+static inline size_t
+vs_string_length(struct vs_string *str)
+{
+ return strnlen(str->ptr, str->max_size);
+}
+
+/**
+ * vs_string_dup - Allocate a C string buffer and copy a Virtual Services string
+ * into it.
+ * @str: Virtual Services string to duplicate
+ */
+static inline char *
+vs_string_dup(struct vs_string *str, gfp_t gfp)
+{
+ size_t len;
+ char *ret;
+
+ len = strnlen(str->ptr, str->max_size) + 1;
+ ret = kmalloc(len, gfp);
+ if (ret)
+ vs_string_copyout(ret, str, len);
+ return ret;
+}
+
+/**
+ * vs_string_max_size - Return the maximum size of a Virtual Services string,
+ * not including the NUL terminator if the lenght of the
+ * string is equal to max_size.
+ *
+ * @str Virtual Services string to return the maximum size of.
+ *
+ * @return The maximum size of the string.
+ */
+static inline size_t
+vs_string_max_size(struct vs_string *str)
+{
+ return str->max_size;
+}
+
+/**
+ * struct vs_pbuf - Handle to a variable-size buffered payload.
+ * @data: Data buffer
+ * @size: Current size of the buffer
+ * @max_size: Maximum size of the buffer
+ *
+ * This is similar to struct vs_string, except that has an explicitly
+ * stored size rather than being null-terminated. The functions that
+ * return ssize_t all return the new size of the modified buffer, and
+ * will return a negative size if the buffer overflows.
+ */
+struct vs_pbuf {
+ void *data;
+ size_t size, max_size;
+};
+
+/**
+ * vs_pbuf_size - Get the size of a pbuf
+ * @pbuf: pbuf to get the size of
+ */
+static inline size_t vs_pbuf_size(const struct vs_pbuf *pbuf)
+{
+ return pbuf->size;
+}
+
+/**
+ * vs_pbuf_data - Get the data pointer for a a pbuf
+ * @pbuf: pbuf to get the data pointer for
+ */
+static inline const void *vs_pbuf_data(const struct vs_pbuf *pbuf)
+{
+ return pbuf->data;
+}
+
+/**
+ * vs_pbuf_resize - Resize a pbuf
+ * @pbuf: pbuf to resize
+ * @size: New size
+ */
+static inline ssize_t vs_pbuf_resize(struct vs_pbuf *pbuf, size_t size)
+{
+ if (size > pbuf->max_size)
+ return -EOVERFLOW;
+
+ pbuf->size = size;
+ return size;
+}
+
+/**
+ * vs_pbuf_copyin - Copy data into a pbuf
+ * @pbuf: pbuf to copy data into
+ * @offset: Offset to copy data to
+ * @data: Pointer to data to copy into the pbuf
+ * @nbytes: Number of bytes to copy into the pbuf
+ */
+static inline ssize_t vs_pbuf_copyin(struct vs_pbuf *pbuf, off_t offset,
+ const void *data, size_t nbytes)
+{
+ if (offset + nbytes > pbuf->size)
+ return -EOVERFLOW;
+
+ memcpy(pbuf->data + offset, data, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * vs_pbuf_append - Append data to a pbuf
+ * @pbuf: pbuf to append to
+ * @data: Pointer to data to append to the pbuf
+ * @nbytes: Number of bytes to append
+ */
+static inline ssize_t vs_pbuf_append(struct vs_pbuf *pbuf,
+ const void *data, size_t nbytes)
+{
+ if (pbuf->size + nbytes > pbuf->max_size)
+ return -EOVERFLOW;
+
+ memcpy(pbuf->data + pbuf->size, data, nbytes);
+ pbuf->size += nbytes;
+
+ return pbuf->size;
+}
+
+/**
+ * vs_pbuf_dup_string - Duplicate the contents of a pbuf as a C string. The
+ * string is allocated and must be freed using kfree.
+ * @pbuf: pbuf to convert
+ * @gfp_flags: GFP flags for the string allocation
+ */
+static inline char *vs_pbuf_dup_string(struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return kstrndup(pbuf->data, pbuf->size, gfp_flags);
+}
+
+#endif /* _VSERVICES_BUFFER_H_ */
diff --git a/include/vservices/ioctl.h b/include/vservices/ioctl.h
new file mode 100644
index 000000000000..d96fcabb9829
--- /dev/null
+++ b/include/vservices/ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * vservices/ioctl.h - Interface to service character devices
+ *
+ * Copyright (c) 2016, Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+#define __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* ioctls that work on any opened service device */
+#define IOCTL_VS_RESET_SERVICE _IO('4', 0)
+#define IOCTL_VS_GET_NAME _IOR('4', 1, char[16])
+#define IOCTL_VS_GET_PROTOCOL _IOR('4', 2, char[32])
+
+/*
+ * Claim a device for user I/O (if no kernel driver is attached). The claim
+ * persists until the char device is closed.
+ */
+struct vs_ioctl_bind {
+ __u32 send_quota;
+ __u32 recv_quota;
+ __u32 send_notify_bits;
+ __u32 recv_notify_bits;
+ size_t msg_size;
+};
+#define IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_ioctl_bind)
+#define IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_ioctl_bind)
+
+/* send and receive messages and notifications */
+#define IOCTL_VS_NOTIFY _IOW('4', 5, __u32)
+struct vs_ioctl_iovec {
+ union {
+ __u32 iovcnt; /* input */
+ __u32 notify_bits; /* output (recv only) */
+ };
+ struct iovec *iov;
+};
+#define IOCTL_VS_SEND _IOW('4', 6, struct vs_ioctl_iovec)
+#define IOCTL_VS_RECV _IOWR('4', 7, struct vs_ioctl_iovec)
+
+#endif /* __LINUX_PUBLIC_VSERVICES_IOCTL_H__ */
diff --git a/include/vservices/protocol/Kbuild b/include/vservices/protocol/Kbuild
new file mode 100644
index 000000000000..374d9b69a5df
--- /dev/null
+++ b/include/vservices/protocol/Kbuild
@@ -0,0 +1,12 @@
+#
+# Find all of the protocol directory names, and get the basename followed
+# by a trailing slash.
+#
+protocols=$(shell find include/vservices/protocol/ -mindepth 1 -type d -exec basename {} \;)
+protocol_dirs=$(foreach p, $(protocols), $(p)/)
+
+#
+# Export the headers for all protocols. The kbuild file in each protocol
+# directory specifies exactly which headers to export.
+#
+header-y += $(protocol_dirs)
diff --git a/include/vservices/protocol/block/Kbuild b/include/vservices/protocol/block/Kbuild
new file mode 100644
index 000000000000..ec3cbe813b00
--- /dev/null
+++ b/include/vservices/protocol/block/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/block/client.h b/include/vservices/protocol/block/client.h
new file mode 100644
index 000000000000..4cd2847a74d5
--- /dev/null
+++ b/include/vservices/protocol/block/client.h
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_BLOCK__)
+#define __VSERVICES_CLIENT_BLOCK__
+
+struct vs_service_device;
+struct vs_client_block_state;
+
+struct vs_client_block {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_block_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_block_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+ void (*opened) (struct vs_client_block_state * _state);
+
+ void (*reopened) (struct vs_client_block_state * _state);
+
+ void (*closed) (struct vs_client_block_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_block_state * _state);
+
+ struct {
+ int (*ack_read) (struct vs_client_block_state * _state,
+ void *_opaque, struct vs_pbuf data,
+ struct vs_mbuf * _mbuf);
+ int (*nack_read) (struct vs_client_block_state * _state,
+ void *_opaque,
+ vservice_block_block_io_error_t err);
+
+ int (*ack_write) (struct vs_client_block_state * _state,
+ void *_opaque);
+ int (*nack_write) (struct vs_client_block_state * _state,
+ void *_opaque,
+ vservice_block_block_io_error_t err);
+
+ } io;
+};
+
+struct vs_client_block_state {
+ vservice_block_state_t state;
+ bool readonly;
+ uint32_t sector_size;
+ uint32_t segment_size;
+ uint64_t device_sectors;
+ bool flushable;
+ bool committable;
+ struct {
+ uint32_t sector_size;
+ uint32_t segment_size;
+ } io;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_block_reopen(struct vs_client_block_state *_state);
+
+extern int vs_client_block_close(struct vs_client_block_state *_state);
+
+ /** interface block_io **/
+/* command parallel read */
+extern int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_free_ack_read(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_read(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay,
+ bool flush, gfp_t flags);
+
+ /* command parallel write */
+extern struct vs_mbuf *vs_client_block_io_alloc_req_write(struct
+ vs_client_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags);
+extern int vs_client_block_io_free_req_write(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_write(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay,
+ bool flush, bool commit,
+ struct vs_pbuf data,
+ struct vs_mbuf *_mbuf);
+
+/* Status APIs for async parallel commands */
+static inline bool vs_client_block_io_req_read_can_send(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_full(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_read_is_pending(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_can_send(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_full(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_is_pending(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_client_register(struct vs_client_block *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_block_client_register(struct vs_client_block *client,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_block_client_register(client, name, this_module);
+}
+
+extern int vservice_block_client_unregister(struct vs_client_block *client);
+
+#endif /* ! __VSERVICES_CLIENT_BLOCK__ */
diff --git a/include/vservices/protocol/block/common.h b/include/vservices/protocol/block/common.h
new file mode 100644
index 000000000000..2779b18783d0
--- /dev/null
+++ b/include/vservices/protocol/block/common.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_BLOCK_PROTOCOL_H__)
+#define __VSERVICES_BLOCK_PROTOCOL_H__
+
+#define VSERVICE_BLOCK_PROTOCOL_NAME "com.ok-labs.block"
+typedef enum {
+ VSERVICE_BLOCK_BASE_REQ_OPEN,
+ VSERVICE_BLOCK_BASE_ACK_OPEN,
+ VSERVICE_BLOCK_BASE_NACK_OPEN,
+ VSERVICE_BLOCK_BASE_REQ_CLOSE,
+ VSERVICE_BLOCK_BASE_ACK_CLOSE,
+ VSERVICE_BLOCK_BASE_NACK_CLOSE,
+ VSERVICE_BLOCK_BASE_REQ_REOPEN,
+ VSERVICE_BLOCK_BASE_ACK_REOPEN,
+ VSERVICE_BLOCK_BASE_NACK_REOPEN,
+ VSERVICE_BLOCK_BASE_MSG_RESET,
+ VSERVICE_BLOCK_IO_REQ_READ,
+ VSERVICE_BLOCK_IO_ACK_READ,
+ VSERVICE_BLOCK_IO_NACK_READ,
+ VSERVICE_BLOCK_IO_REQ_WRITE,
+ VSERVICE_BLOCK_IO_ACK_WRITE,
+ VSERVICE_BLOCK_IO_NACK_WRITE,
+} vservice_block_message_id_t;
+typedef enum {
+ VSERVICE_BLOCK_NBIT_IN__COUNT
+} vservice_block_nbit_in_t;
+
+typedef enum {
+ VSERVICE_BLOCK_NBIT_OUT__COUNT
+} vservice_block_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_BLOCK_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/block/server.h b/include/vservices/protocol/block/server.h
new file mode 100644
index 000000000000..65b0bfda162f
--- /dev/null
+++ b/include/vservices/protocol/block/server.h
@@ -0,0 +1,177 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_BLOCK)
+#define VSERVICES_SERVER_BLOCK
+
+struct vs_service_device;
+struct vs_server_block_state;
+
+struct vs_server_block {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_block_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_block_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+ vs_server_response_type_t(*open) (struct vs_server_block_state *
+ _state);
+
+ vs_server_response_type_t(*reopen) (struct vs_server_block_state *
+ _state);
+
+ vs_server_response_type_t(*close) (struct vs_server_block_state *
+ _state);
+
+ void (*closed) (struct vs_server_block_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_block_state * _state);
+
+ struct {
+ int (*req_read) (struct vs_server_block_state * _state,
+ uint32_t _opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush);
+
+ int (*req_write) (struct vs_server_block_state * _state,
+ uint32_t _opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush,
+ bool commit, struct vs_pbuf data,
+ struct vs_mbuf * _mbuf);
+
+ } io;
+};
+
+struct vs_server_block_state {
+ vservice_block_state_t state;
+ bool readonly;
+ uint32_t sector_size;
+ uint32_t segment_size;
+ uint64_t device_sectors;
+ bool flushable;
+ bool committable;
+ struct {
+ uint32_t sector_size;
+ uint32_t segment_size;
+ } io;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_block_open_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_block_close_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+ /** interface block_io **/
+/* command parallel read */
+extern struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct
+ vs_server_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags);
+extern int vs_server_block_io_free_ack_read(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_read(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ struct vs_pbuf data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_nack_read(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ vservice_block_block_io_error_t
+ err, gfp_t flags);
+ /* command parallel write */
+extern int vs_server_block_io_getbufs_req_write(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_free_req_write(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_write(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ gfp_t flags);
+extern int vs_server_block_io_send_nack_write(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ vservice_block_block_io_error_t
+ err, gfp_t flags);
+
+static inline bool vs_server_block_io_send_ack_read_is_pending(struct
+ vs_server_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_server_block_io_send_ack_write_is_pending(struct
+ vs_server_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_server_register(struct vs_server_block *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_block_server_register(struct vs_server_block *server,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_block_server_register(server, name, this_module);
+}
+
+extern int vservice_block_server_unregister(struct vs_server_block *server);
+#endif /* ! VSERVICES_SERVER_BLOCK */
diff --git a/include/vservices/protocol/block/types.h b/include/vservices/protocol/block/types.h
new file mode 100644
index 000000000000..52845a3564ef
--- /dev/null
+++ b/include/vservices/protocol/block/types.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_BLOCK_TYPES_H)
+#define VSERVICES_BLOCK_TYPES_H
+
+#define VSERVICE_BLOCK_IO_READ_MAX_PENDING 1024
+#define VSERVICE_BLOCK_IO_WRITE_MAX_PENDING 1024
+
+typedef enum vservice_block_block_io_error {
+ VSERVICE_BLOCK_INVALID_INDEX,
+ VSERVICE_BLOCK_MEDIA_FAILURE,
+ VSERVICE_BLOCK_MEDIA_TIMEOUT,
+ VSERVICE_BLOCK_UNSUPPORTED_COMMAND,
+ VSERVICE_BLOCK_SERVICE_RESET
+} vservice_block_block_io_error_t;
+
+typedef enum {
+/* state closed */
+ VSERVICE_BASE_STATE_CLOSED = 0,
+ VSERVICE_BASE_STATE_CLOSED__OPEN,
+ VSERVICE_BASE_STATE_CLOSED__CLOSE,
+ VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+ VSERVICE_BASE_STATE_RUNNING,
+ VSERVICE_BASE_STATE_RUNNING__OPEN,
+ VSERVICE_BASE_STATE_RUNNING__CLOSE,
+ VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+ VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+ vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+ state)
+{
+ static const char *names[] =
+ { "closed", "closed__open", "closed__close", "closed__reopen",
+ "running", "running__open", "running__close", "running__reopen"
+ };
+ if (!VSERVICE_BASE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+ DECLARE_BITMAP(read_bitmask, VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+ void *read_tags[VSERVICE_BLOCK_IO_READ_MAX_PENDING];
+ DECLARE_BITMAP(write_bitmask, VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+ void *write_tags[VSERVICE_BLOCK_IO_WRITE_MAX_PENDING];
+} vservice_block_io_state_t;
+
+#define VSERVICE_BLOCK_IO_RESET_STATE (vservice_block_io_state_t) { \
+.read_bitmask = {0}, \
+.read_tags = {NULL}, \
+.write_bitmask = {0}, \
+.write_tags = {NULL}}
+
+#define VSERVICE_BLOCK_IO_STATE_VALID(state) true
+
+typedef struct {
+
+ vservice_base_state_t base;
+
+ vservice_block_io_state_t io;
+} vservice_block_state_t;
+
+#define VSERVICE_BLOCK_RESET_STATE (vservice_block_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.io = VSERVICE_BLOCK_IO_RESET_STATE }
+
+#define VSERVICE_BLOCK_IS_STATE_RESET(state) \
+ ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif /* ! VSERVICES_BLOCK_TYPES_H */
diff --git a/include/vservices/protocol/core.h b/include/vservices/protocol/core.h
new file mode 100644
index 000000000000..3a86af5a5ec8
--- /dev/null
+++ b/include/vservices/protocol/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/vservices/protocol/core.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These are the common generated definitions for the core protocol drivers;
+ * specifically the message IDs and the protocol state representation.
+ *
+ * This is currently hand-generated, but will eventually be autogenerated,
+ * from the protocol specifications in core.vs. Please keep it consistent
+ * with that file.
+ */
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__PROTOCOL_NAME 32
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__SERVICE_NAME 16
+
+/*
+ * Identifiers for in-band messages.
+ *
+ * This definition applies in both directions, because there is no practical
+ * limit on message IDs (services are unlikely to define 2^16 distinct message
+ * names).
+ */
+typedef enum {
+ /** simple_protocol core **/
+ /* message out startup */
+ VSERVICE_CORE_MSG_STARTUP,
+
+ /* message out shutdown */
+ VSERVICE_CORE_MSG_SHUTDOWN,
+
+ /* command in sync connect */
+ VSERVICE_CORE_REQ_CONNECT,
+ VSERVICE_CORE_ACK_CONNECT,
+ VSERVICE_CORE_NACK_CONNECT,
+
+ /* command in sync disconnect */
+ VSERVICE_CORE_REQ_DISCONNECT,
+ VSERVICE_CORE_ACK_DISCONNECT,
+ VSERVICE_CORE_NACK_DISCONNECT,
+
+ /* command in service_count */
+ VSERVICE_CORE_REQ_SERVICE_COUNT,
+ VSERVICE_CORE_ACK_SERVICE_COUNT,
+ VSERVICE_CORE_NACK_SERVICE_COUNT,
+
+ /* command in queued service_info */
+ VSERVICE_CORE_REQ_SERVICE_INFO,
+ VSERVICE_CORE_ACK_SERVICE_INFO,
+ VSERVICE_CORE_NACK_SERVICE_INFO,
+
+ /* message inout service_reset */
+ VSERVICE_CORE_MSG_SERVICE_RESET,
+
+ /* message inout service_ready */
+ VSERVICE_CORE_MSG_SERVICE_READY,
+
+ /* message out notification bits */
+ VSERVICE_CORE_MSG_NOTIFICATION_BITS_INFO,
+
+} vservice_core_message_id_t;
+
+/*
+ * Notification bits are defined separately for each direction because there
+ * is relatively limited space to allocate them from (specifically, the bits in
+ * a machine word). It is unlikely but possible for a protocol to reach this
+ * limit.
+ */
+
+/* Bits in the in (client -> server) notification bitmask. */
+typedef enum {
+ /** simple_protocol core **/
+ /* No in notifications */
+
+ VSERVICE_CORE_NBIT_IN__COUNT = 0,
+} vservice_core_nbit_in_t;
+
+/* Masks for the in notification bits */
+/* No in notifications */
+
+/* Bits in the out (server -> client) notification bitmask. */
+typedef enum {
+ /** simple_protocol core **/
+ /* notification out reenumerate */
+ VSERVICE_CORE_NBIT_OUT_REENUMERATE = 0,
+
+ VSERVICE_CORE_NBIT_OUT__COUNT,
+} vservice_core_nbit_out_t;
+
+/* Masks for the out notification bits */
+#define VSERVICE_CORE_NMASK_OUT_REENUMERATE \
+ (1 << VSERVICE_CORE_NBIT_OUT_REENUMERATE)
+
+/* Valid states of the interface's generated state machine. */
+typedef enum {
+ /* state offline */
+ VSERVICE_CORE_STATE_OFFLINE = 0,
+
+ /* state disconnected */
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+
+ /* state connected */
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+ /* reset offline */
+ VSERVICE_CORE_STATE__RESET = VSERVICE_CORE_STATE_OFFLINE,
+} vservice_core_statenum_t;
+
+typedef struct {
+ vservice_core_statenum_t statenum;
+ bool pending_service_count;
+ unsigned pending_service_info;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+ .statenum = VSERVICE_CORE_STATE__RESET, \
+ .pending_service_count = false, \
+ .pending_service_info = 0 }
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_OFFLINE))
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+ ((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT))
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+ ((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) \
+ VSERVICE_CORE_STATE_IS_OFFLINE(state) ? ( \
+ ((state).pending_service_count == false) && \
+ ((state).pending_service_info == 0)) : \
+ VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? ( \
+ ((state).pending_service_count == false) && \
+ ((state).pending_service_info == 0)) : \
+ VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+ false)
diff --git a/include/vservices/protocol/core/Kbuild b/include/vservices/protocol/core/Kbuild
new file mode 100644
index 000000000000..ec3cbe813b00
--- /dev/null
+++ b/include/vservices/protocol/core/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/core/client.h b/include/vservices/protocol/core/client.h
new file mode 100644
index 000000000000..3d529990ad19
--- /dev/null
+++ b/include/vservices/protocol/core/client.h
@@ -0,0 +1,155 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_CORE__)
+#define __VSERVICES_CLIENT_CORE__
+
+struct vs_service_device;
+struct vs_client_core_state;
+
+struct vs_client_core {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_core_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_core_state * _state);
+
+ struct vs_service_driver *driver;
+
+ /** Core service base interface **/
+ void (*start) (struct vs_client_core_state * _state);
+ void (*reset) (struct vs_client_core_state * _state);
+ /** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_core_state * _state);
+
+ struct {
+ int (*state_change) (struct vs_client_core_state * _state,
+ vservice_core_statenum_t old,
+ vservice_core_statenum_t new);
+
+ int (*ack_connect) (struct vs_client_core_state * _state);
+ int (*nack_connect) (struct vs_client_core_state * _state);
+
+ int (*ack_disconnect) (struct vs_client_core_state * _state);
+ int (*nack_disconnect) (struct vs_client_core_state * _state);
+
+ int (*msg_startup) (struct vs_client_core_state * _state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota);
+
+ int (*msg_shutdown) (struct vs_client_core_state * _state);
+
+ int (*msg_service_created) (struct vs_client_core_state *
+ _state, uint32_t service_id,
+ struct vs_string service_name,
+ struct vs_string protocol_name,
+ struct vs_mbuf * _mbuf);
+
+ int (*msg_service_removed) (struct vs_client_core_state *
+ _state, uint32_t service_id);
+
+ int (*msg_server_ready) (struct vs_client_core_state * _state,
+ uint32_t service_id, uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits);
+
+ int (*msg_service_reset) (struct vs_client_core_state * _state,
+ uint32_t service_id);
+
+ } core;
+};
+
+struct vs_client_core_state {
+ vservice_core_protocol_state_t state;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_core_reopen(struct vs_client_core_state *_state);
+
+extern int vs_client_core_close(struct vs_client_core_state *_state);
+
+ /** interface core **/
+/* command sync connect */
+extern int vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+ gfp_t flags);
+
+ /* command sync disconnect */
+extern int vs_client_core_core_req_disconnect(struct vs_client_core_state
+ *_state, gfp_t flags);
+
+ /* message startup */
+/* message shutdown */
+/* message service_created */
+extern int vs_client_core_core_getbufs_service_created(struct
+ vs_client_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_core_core_free_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+ /* message service_removed */
+/* message server_ready */
+/* message service_reset */
+extern int vs_client_core_core_send_service_reset(struct vs_client_core_state
+ *_state, uint32_t service_id,
+ gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_client_register(struct vs_client_core *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_core_client_register(struct vs_client_core *client,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_core_client_register(client, name, this_module);
+}
+
+extern int vservice_core_client_unregister(struct vs_client_core *client);
+
+#endif /* ! __VSERVICES_CLIENT_CORE__ */
diff --git a/include/vservices/protocol/core/common.h b/include/vservices/protocol/core/common.h
new file mode 100644
index 000000000000..b496416119c6
--- /dev/null
+++ b/include/vservices/protocol/core/common.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CORE_PROTOCOL_H__)
+#define __VSERVICES_CORE_PROTOCOL_H__
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+typedef enum {
+ VSERVICE_CORE_CORE_REQ_CONNECT,
+ VSERVICE_CORE_CORE_ACK_CONNECT,
+ VSERVICE_CORE_CORE_NACK_CONNECT,
+ VSERVICE_CORE_CORE_REQ_DISCONNECT,
+ VSERVICE_CORE_CORE_ACK_DISCONNECT,
+ VSERVICE_CORE_CORE_NACK_DISCONNECT,
+ VSERVICE_CORE_CORE_MSG_STARTUP,
+ VSERVICE_CORE_CORE_MSG_SHUTDOWN,
+ VSERVICE_CORE_CORE_MSG_SERVICE_CREATED,
+ VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED,
+ VSERVICE_CORE_CORE_MSG_SERVER_READY,
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET,
+} vservice_core_message_id_t;
+typedef enum {
+ VSERVICE_CORE_NBIT_IN__COUNT
+} vservice_core_nbit_in_t;
+
+typedef enum {
+ VSERVICE_CORE_NBIT_OUT__COUNT
+} vservice_core_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_CORE_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/core/server.h b/include/vservices/protocol/core/server.h
new file mode 100644
index 000000000000..959b8c3293bd
--- /dev/null
+++ b/include/vservices/protocol/core/server.h
@@ -0,0 +1,171 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_CORE)
+#define VSERVICES_SERVER_CORE
+
+struct vs_service_device;
+struct vs_server_core_state;
+
+struct vs_server_core {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_core_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_core_state * _state);
+
+ struct vs_service_driver *driver;
+
+ /** Core service base interface **/
+ void (*start) (struct vs_server_core_state * _state);
+ void (*reset) (struct vs_server_core_state * _state);
+ /** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_core_state * _state);
+
+ struct {
+ int (*state_change) (struct vs_server_core_state * _state,
+ vservice_core_statenum_t old,
+ vservice_core_statenum_t new);
+
+ int (*req_connect) (struct vs_server_core_state * _state);
+
+ int (*req_disconnect) (struct vs_server_core_state * _state);
+
+ int (*msg_service_reset) (struct vs_server_core_state * _state,
+ uint32_t service_id);
+
+ } core;
+};
+
+struct vs_server_core_state {
+ vservice_core_protocol_state_t state;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+
+ /** interface core **/
+/* command sync connect */
+extern int vs_server_core_core_send_ack_connect(struct vs_server_core_state
+ *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_connect(struct vs_server_core_state
+ *_state, gfp_t flags);
+ /* command sync disconnect */
+extern int vs_server_core_core_send_ack_disconnect(struct vs_server_core_state
+ *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_disconnect(struct vs_server_core_state
+ *_state, gfp_t flags);
+ /* message startup */
+extern int vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota,
+ gfp_t flags);
+
+ /* message shutdown */
+extern int vs_server_core_core_send_shutdown(struct vs_server_core_state
+ *_state, gfp_t flags);
+
+ /* message service_created */
+extern struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+ vs_server_core_state
+ *_state,
+ struct
+ vs_string
+ *service_name,
+ struct
+ vs_string
+ *protocol_name,
+ gfp_t flags);
+extern int vs_server_core_core_free_service_created(struct vs_server_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_core_core_send_service_created(struct vs_server_core_state
+ *_state,
+ uint32_t service_id,
+ struct vs_string
+ service_name,
+ struct vs_string
+ protocol_name,
+ struct vs_mbuf *_mbuf);
+
+ /* message service_removed */
+extern int vs_server_core_core_send_service_removed(struct vs_server_core_state
+ *_state,
+ uint32_t service_id,
+ gfp_t flags);
+
+ /* message server_ready */
+extern int vs_server_core_core_send_server_ready(struct vs_server_core_state
+ *_state, uint32_t service_id,
+ uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits,
+ gfp_t flags);
+
+ /* message service_reset */
+extern int vs_server_core_core_send_service_reset(struct vs_server_core_state
+ *_state, uint32_t service_id,
+ gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_server_register(struct vs_server_core *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_core_server_register(struct vs_server_core *server,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_core_server_register(server, name, this_module);
+}
+
+extern int vservice_core_server_unregister(struct vs_server_core *server);
+#endif /* ! VSERVICES_SERVER_CORE */
diff --git a/include/vservices/protocol/core/types.h b/include/vservices/protocol/core/types.h
new file mode 100644
index 000000000000..2d6928dc0e06
--- /dev/null
+++ b/include/vservices/protocol/core/types.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_CORE_TYPES_H)
+#define VSERVICES_CORE_TYPES_H
+
+#define VSERVICE_CORE_SERVICE_NAME_SIZE (uint32_t)16
+
+#define VSERVICE_CORE_PROTOCOL_NAME_SIZE (uint32_t)32
+
+typedef enum {
+/* state offline */
+ VSERVICE_CORE_STATE_OFFLINE = 0,
+ VSERVICE_CORE_STATE_OFFLINE__CONNECT,
+ VSERVICE_CORE_STATE_OFFLINE__DISCONNECT,
+
+/* state disconnected */
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT,
+
+/* state connected */
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+ VSERVICE_CORE__RESET = VSERVICE_CORE_STATE_OFFLINE
+} vservice_core_statenum_t;
+
+typedef struct {
+ vservice_core_statenum_t statenum;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+.statenum = VSERVICE_CORE__RESET}
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) (\
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) ( \
+VSERVICE_CORE_STATE_IS_OFFLINE(state) ? true : \
+VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? true : \
+VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+false)
+
+static inline const char *vservice_core_get_state_string(vservice_core_state_t
+ state)
+{
+ static const char *names[] =
+ { "offline", "offline__connect", "offline__disconnect",
+ "disconnected", "disconnected__connect",
+ "disconnected__disconnect",
+ "connected", "connected__connect", "connected__disconnect"
+ };
+ if (!VSERVICE_CORE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+
+ vservice_core_state_t core;
+} vservice_core_protocol_state_t;
+
+#define VSERVICE_CORE_PROTOCOL_RESET_STATE (vservice_core_protocol_state_t) {\
+.core = VSERVICE_CORE_RESET_STATE }
+#endif /* ! VSERVICES_CORE_TYPES_H */
diff --git a/include/vservices/protocol/serial/Kbuild b/include/vservices/protocol/serial/Kbuild
new file mode 100644
index 000000000000..ec3cbe813b00
--- /dev/null
+++ b/include/vservices/protocol/serial/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/serial/client.h b/include/vservices/protocol/serial/client.h
new file mode 100644
index 000000000000..78efed2ef234
--- /dev/null
+++ b/include/vservices/protocol/serial/client.h
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_SERIAL__)
+#define __VSERVICES_CLIENT_SERIAL__
+
+struct vs_service_device;
+struct vs_client_serial_state;
+
+struct vs_client_serial {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_serial_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_serial_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+ void (*opened) (struct vs_client_serial_state * _state);
+
+ void (*reopened) (struct vs_client_serial_state * _state);
+
+ void (*closed) (struct vs_client_serial_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_serial_state * _state);
+
+ struct {
+ int (*msg_msg) (struct vs_client_serial_state * _state,
+ struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+ } serial;
+};
+
+struct vs_client_serial_state {
+ vservice_serial_protocol_state_t state;
+ uint32_t packet_size;
+ struct {
+ uint32_t packet_size;
+ } serial;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_serial_reopen(struct vs_client_serial_state *_state);
+
+extern int vs_client_serial_close(struct vs_client_serial_state *_state);
+
+ /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct
+ vs_client_serial_state
+ *_state,
+ struct vs_pbuf *b,
+ gfp_t flags);
+extern int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_free_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_send_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf b,
+ struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_client_register(struct vs_client_serial *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_serial_client_register(struct vs_client_serial
+ *client, const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_serial_client_register(client, name, this_module);
+}
+
+extern int vservice_serial_client_unregister(struct vs_client_serial *client);
+
+#endif /* ! __VSERVICES_CLIENT_SERIAL__ */
diff --git a/include/vservices/protocol/serial/common.h b/include/vservices/protocol/serial/common.h
new file mode 100644
index 000000000000..a5306453afc0
--- /dev/null
+++ b/include/vservices/protocol/serial/common.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_SERIAL_PROTOCOL_H__)
+#define __VSERVICES_SERIAL_PROTOCOL_H__
+
+#define VSERVICE_SERIAL_PROTOCOL_NAME "com.ok-labs.serial"
+typedef enum {
+ VSERVICE_SERIAL_BASE_REQ_OPEN,
+ VSERVICE_SERIAL_BASE_ACK_OPEN,
+ VSERVICE_SERIAL_BASE_NACK_OPEN,
+ VSERVICE_SERIAL_BASE_REQ_CLOSE,
+ VSERVICE_SERIAL_BASE_ACK_CLOSE,
+ VSERVICE_SERIAL_BASE_NACK_CLOSE,
+ VSERVICE_SERIAL_BASE_REQ_REOPEN,
+ VSERVICE_SERIAL_BASE_ACK_REOPEN,
+ VSERVICE_SERIAL_BASE_NACK_REOPEN,
+ VSERVICE_SERIAL_BASE_MSG_RESET,
+ VSERVICE_SERIAL_SERIAL_MSG_MSG,
+} vservice_serial_message_id_t;
+typedef enum {
+ VSERVICE_SERIAL_NBIT_IN__COUNT
+} vservice_serial_nbit_in_t;
+
+typedef enum {
+ VSERVICE_SERIAL_NBIT_OUT__COUNT
+} vservice_serial_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_SERIAL_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/serial/server.h b/include/vservices/protocol/serial/server.h
new file mode 100644
index 000000000000..001fed5949af
--- /dev/null
+++ b/include/vservices/protocol/serial/server.h
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_SERIAL)
+#define VSERVICES_SERVER_SERIAL
+
+struct vs_service_device;
+struct vs_server_serial_state;
+
+struct vs_server_serial {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_serial_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_serial_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+ vs_server_response_type_t(*open) (struct vs_server_serial_state *
+ _state);
+
+ vs_server_response_type_t(*reopen) (struct vs_server_serial_state *
+ _state);
+
+ vs_server_response_type_t(*close) (struct vs_server_serial_state *
+ _state);
+
+ void (*closed) (struct vs_server_serial_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_serial_state * _state);
+
+ struct {
+ int (*msg_msg) (struct vs_server_serial_state * _state,
+ struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+ } serial;
+};
+
+struct vs_server_serial_state {
+ vservice_serial_protocol_state_t state;
+ uint32_t packet_size;
+ struct {
+ uint32_t packet_size;
+ } serial;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_serial_close_complete(struct vs_server_serial_state
+ *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_serial_reopen_complete(struct vs_server_serial_state
+ *_state,
+ vs_server_response_type_t resp);
+
+ /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct
+ vs_server_serial_state
+ *_state,
+ struct vs_pbuf *b,
+ gfp_t flags);
+extern int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_free_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_send_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf b,
+ struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_server_register(struct vs_server_serial *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_serial_server_register(struct vs_server_serial
+ *server, const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_serial_server_register(server, name, this_module);
+}
+
+extern int vservice_serial_server_unregister(struct vs_server_serial *server);
+#endif /* ! VSERVICES_SERVER_SERIAL */
diff --git a/include/vservices/protocol/serial/types.h b/include/vservices/protocol/serial/types.h
new file mode 100644
index 000000000000..46edf95770fa
--- /dev/null
+++ b/include/vservices/protocol/serial/types.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERIAL_TYPES_H)
+#define VSERVICES_SERIAL_TYPES_H
+
+typedef enum {
+/* state closed */
+ VSERVICE_BASE_STATE_CLOSED = 0,
+ VSERVICE_BASE_STATE_CLOSED__OPEN,
+ VSERVICE_BASE_STATE_CLOSED__CLOSE,
+ VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+ VSERVICE_BASE_STATE_RUNNING,
+ VSERVICE_BASE_STATE_RUNNING__OPEN,
+ VSERVICE_BASE_STATE_RUNNING__CLOSE,
+ VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+ VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+ vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+ state)
+{
+ static const char *names[] =
+ { "closed", "closed__open", "closed__close", "closed__reopen",
+ "running", "running__open", "running__close", "running__reopen"
+ };
+ if (!VSERVICE_BASE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+} vservice_serial_state_t;
+
+#define VSERVICE_SERIAL_RESET_STATE (vservice_serial_state_t) { \
+}
+
+#define VSERVICE_SERIAL_STATE_VALID(state) true
+
+typedef struct {
+
+ vservice_base_state_t base;
+
+ vservice_serial_state_t serial;
+} vservice_serial_protocol_state_t;
+
+#define VSERVICE_SERIAL_PROTOCOL_RESET_STATE (vservice_serial_protocol_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.serial = VSERVICE_SERIAL_RESET_STATE }
+
+#define VSERVICE_SERIAL_IS_STATE_RESET(state) \
+ ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif /* ! VSERVICES_SERIAL_TYPES_H */
diff --git a/include/vservices/service.h b/include/vservices/service.h
new file mode 100644
index 000000000000..af232b63947a
--- /dev/null
+++ b/include/vservices/service.h
@@ -0,0 +1,674 @@
+/*
+ * include/vservices/service.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the driver and device types for vServices client and
+ * server drivers. These are generally defined by generated protocol-layer
+ * code. However, they can also be defined directly by applications that
+ * don't require protocol generation.
+ */
+
+#ifndef _VSERVICE_SERVICE_H_
+#define _VSERVICE_SERVICE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)
+#include <asm/atomic.h>
+#else
+#include <linux/atomic.h>
+#endif
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/types.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_service_driver - Virtual service driver structure
+ * @protocol: Protocol name for this driver
+ * @is_server: True if this is a server driver, false if it is a client driver
+ * @rx_atomic: If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true
+ * the message handlers are run from tasklet context and may not
+ * sleep. For this purpose, tx_ready is considered a receive
+ * message handler.
+ * @tx_atomic: If this is set to true along with rx_atomic, the driver is
+ * allowed to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or from
+ * task context after calling vs_service_state_lock.
+ * @probe: Probe function for this service
+ * @remove: Remove function for this service
+ * --- Callbacks ---
+ * @receive: Message handler function for this service
+ * @notify: Incoming notification handler function for this service
+ * @start: Callback which is run when this service is started
+ * @reset: Callback which is run when this service is reset
+ * @tx_ready: Callback which is run when the service has dropped below its
+ * send quota
+ * --- Resource requirements (valid for server only) ---
+ * @in_quota_min: minimum number of input messages for protocol functionality
+ * @in_quota_best: suggested number of input messages
+ * @out_quota_min: minimum number of output messages for protocol functionality
+ * @out_quota_best: suggested number of output messages
+ * @in_notify_count: number of input notification bits used
+ * @out_notify_count: number of output notification bits used
+ * --- Internal ---
+ * @driver: Linux device model driver structure
+ *
+ * The callback functions for a virtual service driver are all called from
+ * the virtual service device's work queue.
+ */
+struct vs_service_driver {
+ const char *protocol;
+ bool is_server;
+ bool rx_atomic, tx_atomic;
+
+ int (*probe)(struct vs_service_device *service);
+ int (*remove)(struct vs_service_device *service);
+
+ int (*receive)(struct vs_service_device *service,
+ struct vs_mbuf *mbuf);
+ void (*notify)(struct vs_service_device *service, u32 flags);
+
+ void (*start)(struct vs_service_device *service);
+ void (*reset)(struct vs_service_device *service);
+
+ int (*tx_ready)(struct vs_service_device *service);
+
+ unsigned in_quota_min;
+ unsigned in_quota_best;
+ unsigned out_quota_min;
+ unsigned out_quota_best;
+ unsigned in_notify_count;
+ unsigned out_notify_count;
+
+ struct device_driver driver;
+};
+
+#define to_vs_service_driver(d) \
+ container_of(d, struct vs_service_driver, driver)
+
+/* The vServices server/client bus types */
+extern struct bus_type vs_client_bus_type;
+extern struct bus_type vs_server_bus_type;
+
+/**
+ * struct vs_service_stats - Virtual service statistics
+ * @over_quota_time: Internal counter for tracking over quota time.
+ * @sent_mbufs: Total number of message buffers sent.
+ * @sent_bytes: Total bytes sent.
+ * @send_failures: Total number of send failures.
+ * @recv_mbufs: Total number of message buffers received.
+ * @recv_bytes: Total number of bytes recevied.
+ * @recv_failures: Total number of receive failures.
+ * @nr_over_quota: Number of times an mbuf allocation has failed because the
+ * service is over quota.
+ * @nr_tx_ready: Number of times the service has run its tx_ready handler
+ * @over_quota_time_total: The total amount of time in milli-seconds that the
+ * service has spent over quota. Measured as the time
+ * between exceeding quota in mbuf allocation and
+ * running the tx_ready handler.
+ * @over_quota_time_avg: The average amount of time in milli-seconds that the
+ * service is spending in the over quota state.
+ */
+struct vs_service_stats {
+ unsigned long over_quota_time;
+
+ atomic_t sent_mbufs;
+ atomic_t sent_bytes;
+ atomic_t send_failures;
+ atomic_t recv_mbufs;
+ atomic_t recv_bytes;
+ atomic_t recv_failures;
+ atomic_t nr_over_quota;
+ atomic_t nr_tx_ready;
+ atomic_t over_quota_time_total;
+ atomic_t over_quota_time_avg;
+};
+
+/**
+ * struct vs_service_device - Virtual service device
+ * @id: Unique ID (to the session) for this service
+ * @name: Service name
+ * @sysfs_name: The sysfs name for the service
+ * @protocol: Service protocol name
+ * @is_server: True if this device is server, false if it is a client
+ * @owner: service responsible for managing this service. This must be
+ * on the same session, and is NULL iff this is the core service.
+ * It must not be a service whose driver has tx_atomic set.
+ * @lock_subclass: the number of generations of owners between this service
+ * and the core service; 0 for the core service, 1 for anything directly
+ * created by it, and so on. This is only used for verifying lock
+ * ordering (when lockdep is enabled), hence the name.
+ * @ready_lock: mutex protecting readiness, disable_count and driver_probed.
+ * This depends on the state_mutex of the service's owner, if any. Acquire
+ * it using mutex_lock_nested(ready_lock, lock_subclass).
+ * @readiness: Service's readiness state, owned by session layer.
+ * @disable_count: Number of times the service has been disabled without
+ * a matching enable.
+ * @driver_probed: True if a driver has been probed (and not removed)
+ * @work_queue: Work queue for this service's task-context work.
+ * @rx_tasklet: Tasklet for handling incoming messages. This is only used
+ * if the service driver has rx_atomic set to true. Otherwise
+ * incoming messages are handled on the workqueue by rx_work.
+ * @rx_work: Work structure for handling incoming messages. This is only
+ * used if the service driver has rx_atomic set to false.
+ * @rx_lock: Spinlock which protects access to rx_queue and tx_ready
+ * @rx_queue: Queue of incoming messages
+ * @tx_ready: Flag indicating that a tx_ready event is pending
+ * @tx_batching: Flag indicating that outgoing messages are being batched
+ * @state_spinlock: spinlock used to protect the service state if the
+ * service driver has tx_atomic (and rx_atomic) set to true. This
+ * depends on the service's ready_lock. Acquire it only by
+ * calling vs_service_state_lock_bh().
+ * @state_mutex: mutex used to protect the service state if the service
+ * driver has tx_atomic set to false. This depends on the service's
+ * ready_lock, and if rx_atomic is true, the rx_tasklet must be
+ * disabled while it is held. Acquire it only by calling
+ * vs_service_state_lock().
+ * @state_spinlock_used: Flag to check if the state spinlock has been acquired.
+ * @state_mutex_used: Flag to check if the state mutex has been acquired.
+ * @reset_work: Work to reset the service after a driver fails
+ * @pending_reset: Set if reset_work has been queued and not completed.
+ * @ready_work: Work to make service ready after a throttling delay
+ * @cooloff_work: Work for cooling off reset throttling after the reset
+ * throttling limit was hit
+ * @cleanup_work: Work for cleaning up and freeing the service structure
+ * @last_reset: Time in jiffies at which this service last reset
+ * @last_reset_request: Time in jiffies the last reset request for this
+ * service occurred at
+ * @last_ready: Time in jiffies at which this service last became ready
+ * @reset_delay: Time in jiffies that the next throttled reset will be
+ * delayed for. A value of zero means that reset throttling is not in
+ * effect.
+ * @is_over_quota: Internal flag for whether the service is over quota. This
+ * flag is only used for stats accounting.
+ * @quota_wq: waitqueue that is woken whenever the available send quota
+ * increases.
+ * @notify_send_bits: The number of bits allocated for outgoing notifications.
+ * @notify_send_offset: The first bit allocated for outgoing notifications.
+ * @notify_recv_bits: The number of bits allocated for incoming notifications.
+ * @notify_recv_offset: The first bit allocated for incoming notifications.
+ * @send_quota: The maximum number of outgoing messages.
+ * @recv_quota: The maximum number of incoming messages.
+ * @in_quota_set: For servers, the number of client->server messages
+ * requested during system configuration (sysfs or environment).
+ * @out_quota_set: For servers, the number of server->client messages
+ * requested during system configuration (sysfs or environment).
+ * @dev: Linux device model device structure
+ * @stats: Service statistics
+ */
+struct vs_service_device {
+ vs_service_id_t id;
+ char *name;
+ char *sysfs_name;
+ char *protocol;
+ bool is_server;
+
+ struct vs_service_device *owner;
+ unsigned lock_subclass;
+
+ struct mutex ready_lock;
+ unsigned readiness;
+ int disable_count;
+ bool driver_probed;
+
+ struct workqueue_struct *work_queue;
+
+ struct tasklet_struct rx_tasklet;
+ struct work_struct rx_work;
+
+ spinlock_t rx_lock;
+ struct list_head rx_queue;
+ bool tx_ready, tx_batching;
+
+ spinlock_t state_spinlock;
+ struct mutex state_mutex;
+
+ struct work_struct reset_work;
+ bool pending_reset;
+ struct delayed_work ready_work;
+ struct delayed_work cooloff_work;
+ struct work_struct cleanup_work;
+
+ unsigned long last_reset;
+ unsigned long last_reset_request;
+ unsigned long last_ready;
+ unsigned long reset_delay;
+
+ atomic_t is_over_quota;
+ wait_queue_head_t quota_wq;
+
+ unsigned notify_send_bits;
+ unsigned notify_send_offset;
+ unsigned notify_recv_bits;
+ unsigned notify_recv_offset;
+ unsigned send_quota;
+ unsigned recv_quota;
+
+ unsigned in_quota_set;
+ unsigned out_quota_set;
+
+ void *transport_priv;
+
+ struct device dev;
+ struct vs_service_stats stats;
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ bool state_spinlock_used;
+ bool state_mutex_used;
+#endif
+};
+
+#define to_vs_service_device(d) container_of(d, struct vs_service_device, dev)
+
+/**
+ * vs_service_get_session - Return the session for a service
+ * @service: Service to get the session for
+ */
+static inline struct vs_session_device *
+vs_service_get_session(struct vs_service_device *service)
+{
+ return to_vs_session_device(service->dev.parent);
+}
+
+/**
+ * vs_service_send - Send a message from a service
+ * @service: Service to send the message from
+ * @mbuf: Message buffer to send
+ */
+static inline int
+vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ const unsigned long flags =
+ service->tx_batching ? VS_TRANSPORT_SEND_FLAGS_MORE : 0;
+ size_t msg_size = vt->mbuf_size(mbuf);
+ int err;
+
+ err = vt->send(session->transport, service, mbuf, flags);
+ if (!err) {
+ atomic_inc(&service->stats.sent_mbufs);
+ atomic_add(msg_size, &service->stats.sent_bytes);
+ } else {
+ atomic_inc(&service->stats.send_failures);
+ }
+
+ return err;
+}
+
+/**
+ * vs_service_alloc_mbuf - Allocate a message buffer for a service
+ * @service: Service to allocate the buffer for
+ * @size: Size of the data buffer to allocate
+ * @flags: Flags to pass to the buffer allocation
+ */
+static inline struct vs_mbuf *
+vs_service_alloc_mbuf(struct vs_service_device *service, size_t size,
+ gfp_t flags)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_mbuf *mbuf;
+
+ mbuf = session->transport->vt->alloc_mbuf(session->transport,
+ service, size, flags);
+ if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) {
+ /* Over quota accounting */
+ if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) {
+ service->stats.over_quota_time = jiffies;
+ atomic_inc(&service->stats.nr_over_quota);
+ }
+ }
+
+ /*
+ * The transport drivers should return either a valid message buffer
+ * pointer or an ERR_PTR value. Warn here if a transport driver is
+ * returning NULL on message buffer allocation failure.
+ */
+ if (WARN_ON_ONCE(!mbuf))
+ return ERR_PTR(-ENOMEM);
+
+ return mbuf;
+}
+
+/**
+ * vs_service_free_mbuf - Deallocate a message buffer for a service
+ * @service: Service the message buffer was allocated for
+ * @mbuf: Message buffer to deallocate
+ */
+static inline void
+vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ session->transport->vt->free_mbuf(session->transport, service, mbuf);
+}
+
+/**
+ * vs_service_notify - Send a notification from a service
+ * @service: Service to send the notification from
+ * @flags: Notification bits to send
+ */
+static inline int
+vs_service_notify(struct vs_service_device *service, u32 flags)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->notify(session->transport,
+ service, flags);
+}
+
+/**
+ * vs_service_has_atomic_rx - Return whether or not a service's receive
+ * message handler runs in atomic context. This function should only be
+ * called for services which are bound to a driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_rx(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->dev.driver))
+ return false;
+
+ return to_vs_service_driver(service->dev.driver)->rx_atomic;
+}
+
+/**
+ * vs_session_max_mbuf_size - Return the maximum allocation size of a message
+ * buffer.
+ * @service: The service to check
+ */
+static inline size_t
+vs_service_max_mbuf_size(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->max_mbuf_size(session->transport);
+}
+
+/**
+ * vs_service_send_mbufs_available - Return the number of mbufs which can be
+ * allocated for sending before going over quota.
+ * @service: The service to check
+ */
+static inline ssize_t
+vs_service_send_mbufs_available(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->service_send_avail(session->transport,
+ service);
+}
+
+/**
+ * vs_service_has_atomic_tx - Return whether or not a service is allowed to
+ * transmit from atomic context (other than its receive message handler).
+ * This function should only be called for services which are bound to a
+ * driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_tx(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->dev.driver))
+ return false;
+
+ return to_vs_service_driver(service->dev.driver)->tx_atomic;
+}
+
+/**
+ * vs_service_state_lock - Acquire a lock allowing service state operations
+ * from external task contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This must be used to protect any service state accesses that occur in task
+ * contexts outside of a callback from the vservices protocol layer. It must
+ * not be called from a protocol layer callback, nor from atomic context.
+ *
+ * If this service's state is also accessed from softirq contexts other than
+ * vservices protocol layer callbacks, use vs_service_state_lock_bh instead,
+ * and set the driver's tx_atomic flag.
+ *
+ * If this is called from outside the service's workqueue, the calling driver
+ * must provide its own guarantee that it has not been detached from the
+ * service. If that is not possible, use vs_state_lock_safe().
+ */
+static inline void
+vs_service_state_lock(struct vs_service_device *service)
+__acquires(service)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ WARN_ON_ONCE(vs_service_has_atomic_tx(service));
+#endif
+
+ mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ if (WARN_ON_ONCE(service->state_spinlock_used))
+ dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+ service->state_mutex_used = true;
+#endif
+
+ if (vs_service_has_atomic_rx(service))
+ tasklet_disable(&service->rx_tasklet);
+
+ __acquire(service);
+}
+
+/**
+ * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock(struct vs_service_device *service)
+__releases(service)
+{
+ __release(service);
+
+ mutex_unlock(&service->state_mutex);
+
+ if (vs_service_has_atomic_rx(service)) {
+ tasklet_enable(&service->rx_tasklet);
+
+ /* Kick the tasklet if there is RX work to do */
+ if (!list_empty(&service->rx_queue))
+ tasklet_schedule(&service->rx_tasklet);
+ }
+}
+
+/**
+ * vs_service_state_lock_bh - Acquire a lock allowing service state operations
+ * from external task or softirq contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This is an alternative to vs_service_state_lock for drivers that receive
+ * messages in atomic context (i.e. have their rx_atomic flag set), *and* must
+ * transmit messages from softirq contexts other than their own message
+ * receive and tx_ready callbacks. Such drivers must set their tx_atomic
+ * flag, so generated protocol drivers perform correct locking.
+ *
+ * This should replace all calls to vs_service_state_lock for services that
+ * need it. Do not use both locking functions in one service driver.
+ *
+ * The calling driver must provide its own guarantee that it has not been
+ * detached from the service. If that is not possible, use
+ * vs_state_lock_safe_bh().
+ */
+static inline void
+vs_service_state_lock_bh(struct vs_service_device *service)
+__acquires(service)
+__acquires(&service->state_spinlock)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ WARN_ON_ONCE(!vs_service_has_atomic_rx(service));
+ WARN_ON_ONCE(!vs_service_has_atomic_tx(service));
+#endif
+
+#ifdef CONFIG_SMP
+ /* Not necessary on UP because it's implied by spin_lock_bh(). */
+ tasklet_disable(&service->rx_tasklet);
+#endif
+
+ spin_lock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ if (WARN_ON_ONCE(service->state_mutex_used))
+ dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+ service->state_spinlock_used = true;
+#endif
+
+ __acquire(service);
+}
+
+/**
+ * vs_service_state_unlock_bh - Release the lock acquired by
+ * vs_service_state_lock_bh.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock_bh(struct vs_service_device *service)
+__releases(service)
+__releases(&service->state_spinlock)
+{
+ __release(service);
+
+ spin_unlock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_SMP
+ tasklet_enable(&service->rx_tasklet);
+#endif
+}
+
+/* Convenience macros for locking a state structure rather than a service. */
+#define vs_state_lock(state) vs_service_state_lock((state)->service)
+#define vs_state_unlock(state) vs_service_state_unlock((state)->service)
+#define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service)
+#define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service)
+
+/**
+ * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service,
+ * when the service may have been detached from the state.
+ *
+ * This is useful for blocking operations that can't easily be terminated
+ * before returning from the service reset handler, such as file I/O. To use
+ * this, the state structure should be reference-counted rather than freed in
+ * the release callback, and the driver should retain its own reference to the
+ * service until the state structure is freed.
+ *
+ * This macro acquires the lock and returns true if the state has not been
+ * detached from the service. Otherwise, it returns false.
+ *
+ * Note that the _bh variant cannot be used from atomic context, because it
+ * acquires a mutex.
+ */
+#define __vs_state_lock_safe(_state, _lock, _unlock) ({ \
+ bool __ok = true; \
+ typeof(_state) __state = (_state); \
+ struct vs_service_device *__service = __state->service; \
+ mutex_lock_nested(&__service->ready_lock, \
+ __service->lock_subclass); \
+ __ok = !ACCESS_ONCE(__state->released); \
+ if (__ok) { \
+ _lock(__state); \
+ __ok = !ACCESS_ONCE(__state->released); \
+ if (!__ok) \
+ _unlock(__state); \
+ } \
+ mutex_unlock(&__service->ready_lock); \
+ __ok; \
+})
+#define vs_state_lock_safe(_state) \
+ __vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock)
+#define vs_state_lock_safe_bh(_state) \
+ __vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh)
+
+/**
+ * vs_get_service - Get a reference to a service.
+ * @service: Service to get a reference to.
+ */
+static inline struct vs_service_device *
+vs_get_service(struct vs_service_device *service)
+{
+ if (service)
+ get_device(&service->dev);
+ return service;
+}
+
+/**
+ * vs_put_service - Put a reference to a service.
+ * @service: The service to put the reference to.
+ */
+static inline void
+vs_put_service(struct vs_service_device *service)
+{
+ put_device(&service->dev);
+}
+
+extern int vs_service_reset(struct vs_service_device *service,
+ struct vs_service_device *caller);
+extern void vs_service_reset_nosync(struct vs_service_device *service);
+
+/**
+ * vs_service_send_batch_start - Start a batch of outgoing messages
+ * @service: The service that is starting a batch
+ * @flush: Finish any previously started batch (if false, then duplicate
+ * calls to this function have no effect)
+ */
+static inline void
+vs_service_send_batch_start(struct vs_service_device *service, bool flush)
+{
+ if (flush && service->tx_batching) {
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ if (vt->flush)
+ vt->flush(session->transport, service);
+ } else {
+ service->tx_batching = true;
+ }
+}
+
+/**
+ * vs_service_send_batch_end - End a batch of outgoing messages
+ * @service: The service that is ending a batch
+ * @flush: Start sending the batch immediately (if false, the batch will
+ * be flushed when the next message is sent)
+ */
+static inline void
+vs_service_send_batch_end(struct vs_service_device *service, bool flush)
+{
+ service->tx_batching = false;
+ if (flush) {
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ if (vt->flush)
+ vt->flush(session->transport, service);
+ }
+}
+
+
+#endif /* _VSERVICE_SERVICE_H_ */
diff --git a/include/vservices/session.h b/include/vservices/session.h
new file mode 100644
index 000000000000..b9dc775dc9f4
--- /dev/null
+++ b/include/vservices/session.h
@@ -0,0 +1,161 @@
+/*
+ * include/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the device type for a vServices session attached to a
+ * transport. This should only be used by transport drivers, the vServices
+ * session code, and the inline transport-access functions defined in
+ * vservices/service.h.
+ *
+ * Drivers for these devices are defined internally by the vServices
+ * framework. Other drivers should not attach to these devices.
+ */
+
+#ifndef _VSERVICES_SESSION_H_
+#define _VSERVICES_SESSION_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#include <vservices/types.h>
+
+struct vs_service_device;
+struct vs_mbuf;
+
+struct notifier_block;
+
+/**
+ * enum vs_notify_event_t - vService notifier events
+ *
+ * @VS_SESSION_NOTIFY_ADD: vService session added. Argument is a pointer to
+ * the vs_session_device. This notification is sent after the session has been
+ * added.
+ *
+ * @VS_SESSION_NOTIFY_REMOVE: vService session about to be removed. Argument is
+ * a pointer to the vs_session_device. This notification is sent before the
+ * session is removed.
+ */
+enum vs_notify_event_t {
+ VS_SESSION_NOTIFY_ADD,
+ VS_SESSION_NOTIFY_REMOVE,
+};
+
+/**
+ * struct vs_session_device - Session device
+ * @name: The unique human-readable name of this session.
+ * @is_server: True if this session is a server, false if client
+ * @transport: The transport device for this session
+ * @session_num: Unique ID for this session. Used for sysfs
+ * @session_lock: Mutex which protects any change to service presence or
+ * readiness
+ * @core_service: The core service, if one has ever been registered. Once set,
+ * this must remain valid and unchanged until the session driver is
+ * removed. Writes are protected by the service_ids_lock.
+ * @services: Dynamic array of the services on this session. Protected by
+ * service_ids_lock.
+ * @alloc_service_ids: Size of the session services array
+ * @service_ids_lock: Mutex protecting service array updates
+ * @activation_work: work structure for handling session activation & reset
+ * @activation_state: true if transport is currently active
+ * @fatal_error_work: work structure for handling fatal session failures
+ * @debug_mask: Debug level mask
+ * @list: Entry in the global session list
+ * @sysfs_entry: Kobject pointer pointing to session device in sysfs under
+ * sys/vservices
+ * @dev: Device structure for the Linux device model
+ */
+struct vs_session_device {
+ char *name;
+ bool is_server;
+ struct vs_transport *transport;
+ int session_num;
+
+ struct mutex session_lock;
+
+ /*
+ * The service_idr maintains the list of currently allocated services
+ * on a session, and allows for recycling of service ids. The lock also
+ * protects core_service.
+ */
+ struct idr service_idr;
+ struct mutex service_idr_lock;
+ struct vs_service_device *core_service;
+
+ struct work_struct activation_work;
+ atomic_t activation_state;
+
+ struct work_struct fatal_error_work;
+
+ unsigned long debug_mask;
+
+ struct list_head list;
+ struct kobject *sysfs_entry;
+
+ struct device dev;
+};
+
+#define to_vs_session_device(d) \
+ container_of(d, struct vs_session_device, dev)
+
+extern struct vs_session_device *
+vs_session_register(struct vs_transport *transport, struct device *parent,
+ bool server, const char *transport_name);
+extern void vs_session_start(struct vs_session_device *session);
+extern void vs_session_unregister(struct vs_session_device *session);
+
+extern int vs_session_handle_message(struct vs_session_device *session,
+ struct vs_mbuf *mbuf, vs_service_id_t service_id);
+
+extern void vs_session_quota_available(struct vs_session_device *session,
+ vs_service_id_t service_id, unsigned count,
+ bool send_tx_ready);
+
+extern void vs_session_handle_notify(struct vs_session_device *session,
+ unsigned long flags, vs_service_id_t service_id);
+
+extern void vs_session_handle_reset(struct vs_session_device *session);
+extern void vs_session_handle_activate(struct vs_session_device *session);
+
+extern struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+ struct vs_service_device *parent, const char *name,
+ const char *protocol, const void *plat_data);
+extern int vs_server_destroy_service(struct vs_service_device *service,
+ struct vs_service_device *parent);
+
+extern void vs_session_register_notify(struct notifier_block *nb);
+extern void vs_session_unregister_notify(struct notifier_block *nb);
+
+extern int vs_session_unbind_driver(struct vs_service_device *service);
+
+extern void vs_session_for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *, void *), void *data);
+
+extern struct mutex vs_session_lock;
+extern int vs_session_for_each_locked(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data);
+
+static inline int vs_session_for_each(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data)
+{
+ int r;
+ mutex_lock(&vs_session_lock);
+ r = vs_session_for_each_locked(fn, data);
+ mutex_unlock(&vs_session_lock);
+ return r;
+}
+
+#endif /* _VSERVICES_SESSION_H_ */
diff --git a/include/vservices/transport.h b/include/vservices/transport.h
new file mode 100644
index 000000000000..6251ce15684a
--- /dev/null
+++ b/include/vservices/transport.h
@@ -0,0 +1,150 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the transport vtable structure. This is made public so
+ * that the application drivers can call the vtable functions directly (via
+ * the inlined wrappers in service.h) rather than indirectly via a function
+ * call.
+ *
+ */
+
+#ifndef _VSERVICES_TRANSPORT_H_
+#define _VSERVICES_TRANSPORT_H_
+
+#include <linux/types.h>
+
+#include <vservices/types.h>
+
+struct vs_transport;
+struct vs_mbuf;
+struct vs_service_device;
+
+/**
+ * struct vs_transport_vtable - Transport driver operations. Transport drivers
+ * must provide implementations for all operations in this table.
+ * --- Message buffer allocation ---
+ * @alloc_mbuf: Allocate an mbuf of the given size for the given service
+ * @free_mbuf: Deallocate an mbuf
+ * @mbuf_size: Return the size in bytes of a message buffer. The size returned
+ * should be the total number of bytes including any headers.
+ * @max_mbuf_size: Return the maximum allowable message buffer allocation size.
+ * --- Message sending ---
+ * @send: Queue an mbuf for sending
+ * @flush: Start the transfer for the current message batch, if any
+ * @notify: Send a notification
+ * --- Transport-level reset handling ---
+ * @reset: Reset the transport layer
+ * @ready: Ready the transport layer
+ * --- Service management ---
+ * @service_add: A new service has been added to this transport's session
+ * @service_remove: A service has been removed from this transport's session
+ * @service_start: A service on this transport's session has had its resource
+ * allocations set and is about to start. This is always interleaved with
+ * service_reset, with one specific exception: the core service client,
+ * which has its quotas initially hard-coded to 0 send / 1 recv and
+ * adjusted when the initial startup message arrives.
+ * @service_reset: A service on this transport's session has just been reset,
+ * and any resources allocated to it should be cleaned up to prepare
+ * for later reallocation.
+ * @service_send_avail: The number of message buffers that this service is
+ * able to send before going over quota.
+ * --- Query transport capabilities ---
+ * @get_notify_bits: Fetch the number of sent and received notification bits
+ * supported by this transport. Note that this can be any positive value
+ * up to UINT_MAX.
+ * @get_quota_limits: Fetch the total send and receive message buffer quotas
+ * supported by this transport. Note that this can be any positive value
+ * up to UINT_MAX.
+ */
+struct vs_transport_vtable {
+ /* Message buffer allocation */
+ struct vs_mbuf *(*alloc_mbuf)(struct vs_transport *transport,
+ struct vs_service_device *service, size_t size,
+ gfp_t gfp_flags);
+ void (*free_mbuf)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ struct vs_mbuf *mbuf);
+ size_t (*mbuf_size)(struct vs_mbuf *mbuf);
+ size_t (*max_mbuf_size)(struct vs_transport *transport);
+
+ /* Sending messages */
+ int (*send)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ struct vs_mbuf *mbuf, unsigned long flags);
+ int (*flush)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ int (*notify)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ unsigned long bits);
+
+ /* Raising and clearing transport-level reset */
+ void (*reset)(struct vs_transport *transport);
+ void (*ready)(struct vs_transport *transport);
+
+ /* Service management */
+ int (*service_add)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ void (*service_remove)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ int (*service_start)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ int (*service_reset)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ ssize_t (*service_send_avail)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ /* Query transport capabilities */
+ void (*get_notify_bits)(struct vs_transport *transport,
+ unsigned *send_notify_bits, unsigned *recv_notify_bits);
+ void (*get_quota_limits)(struct vs_transport *transport,
+ unsigned *send_quota, unsigned *recv_quota);
+};
+
+/* Flags for .send */
+#define VS_TRANSPORT_SEND_FLAGS_MORE 0x1
+
+/**
+ * struct vs_transport - A structure representing a transport
+ * @type: type of transport i.e. microvisror/loopback etc
+ * @vt: Transport operations table
+ * @notify_info: Array of incoming notification settings
+ * @notify_info_size: Size of the incoming notification array
+ */
+struct vs_transport {
+ const char *type;
+ const struct vs_transport_vtable *vt;
+ struct vs_notify_info *notify_info;
+ int notify_info_size;
+};
+
+/**
+ * struct vs_mbuf - Message buffer. This is always allocated and released by the
+ * transport callbacks defined above, so it may be embedded in a
+ * transport-specific structure containing additional state.
+ * @data: Message data buffer
+ * @size: Size of the data buffer in bytes
+ * @is_recv: True if this mbuf was received from the other end of the
+ * transport. False if it was allocated by this end for sending.
+ * @priv: Private value that will not be touched by the framework
+ * @queue: list_head for entry in lists. The session layer uses this queue
+ * for receiving messages. The transport driver may use this queue for its
+ * own purposes when sending messages.
+ */
+struct vs_mbuf {
+ void *data;
+ size_t size;
+ bool is_recv;
+ void *priv;
+ struct list_head queue;
+};
+
+#endif /* _VSERVICES_TRANSPORT_H_ */
diff --git a/include/vservices/types.h b/include/vservices/types.h
new file mode 100644
index 000000000000..306156eab1ba
--- /dev/null
+++ b/include/vservices/types.h
@@ -0,0 +1,41 @@
+/*
+ * include/vservices/types.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _VSERVICE_TYPES_H
+#define _VSERVICE_TYPES_H
+
+#include <linux/types.h>
+
+typedef u16 vs_service_id_t;
+typedef u16 vs_message_id_t;
+
+/*
+ * An opaque handle to a queued asynchronous command. This is used internally
+ * by the generated interface code, to identify which of the pending commands
+ * is being replied to. It is provided as a parameter to non-blocking handler
+ * callbacks for queued asynchronous requests, and must be stored by the server
+ * and passed to the corresponding reply call.
+ */
+typedef struct vservice_queued_request vservice_queued_request_t;
+
+/*
+ * Following enum is to be used by server for informing about successful or
+ * unsuccessful open callback by using VS_SERVER_RESP_SUCCESS or
+ * VS_SERVER_RESP_FAILURE resepectively. Server can choose to complete request
+ * explicitely in this case it should return VS_SERVER_RESP_EXPLICIT_COMPLETE.
+ */
+typedef enum vs_server_response_type {
+ VS_SERVER_RESP_SUCCESS,
+ VS_SERVER_RESP_FAILURE,
+ VS_SERVER_RESP_EXPLICIT_COMPLETE
+} vs_server_response_type_t;
+
+#endif /*_VSERVICE_TYPES_H */
diff --git a/include/vservices/wait.h b/include/vservices/wait.h
new file mode 100644
index 000000000000..544937de2058
--- /dev/null
+++ b/include/vservices/wait.h
@@ -0,0 +1,455 @@
+/*
+ * include/vservices/wait.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic wait event helpers for Virtual Service drivers.
+ */
+
+#ifndef _VSERVICE_SERVICE_WAIT_H
+#define _VSERVICE_SERVICE_WAIT_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <vservices/service.h>
+
+/* Older kernels don't have lockdep_assert_held_once(). */
+#ifndef lockdep_assert_held_once
+#ifdef CONFIG_LOCKDEP
+#define lockdep_assert_held_once(l) do { \
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
+ } while (0)
+#else
+#define lockdep_assert_held_once(l) do { } while (0)
+#endif
+#endif
+
+/* Legacy wait macro; needs rewriting to use vs_state_lock_safe(). */
+/* FIXME: Redmine ticket #229 - philip. */
+/**
+ * __vs_service_wait_event - Wait for a condition to become true for a
+ * Virtual Service.
+ *
+ * @_service: The service to wait for the condition to be true for.
+ * @_wq: Waitqueue to wait on.
+ * @_condition: Condition to wait for.
+ *
+ * Returns: This function returns 0 if the condition is true, or a -ERESTARTSYS
+ * if the wait loop wait interrupted. If _state is TASK_UNINTERRUPTIBLE
+ * then this function will always return 0.
+ *
+ * This function must be called with the service's state lock held. The wait
+ * is performed without the state lock held, but the condition is re-checked
+ * after reacquiring the state lock. This property allows this function to
+ * check the state of the service's protocol in a thread safe manner.
+ *
+ * The caller is responsible for ensuring that it has not been detached from
+ * the given service.
+ *
+ * It is nearly always wrong to call this on the service workqueue, since
+ * the workqueue is single-threaded and the state can only change when a
+ * handler function is called on it.
+ */
+#define __vs_service_wait_event(_service, _wq, _cond, _state) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret = 0; \
+ \
+ lockdep_assert_held_once(&(_service)->state_mutex); \
+ do { \
+ prepare_to_wait(&(_wq), &__wait, (_state)); \
+ \
+ if (_cond) \
+ break; \
+ \
+ if ((_state) == TASK_INTERRUPTIBLE && \
+ signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ vs_service_state_unlock(_service); \
+ schedule(); \
+ vs_service_state_lock(_service); \
+ } while (!(_cond)); \
+ \
+ finish_wait(&(_wq), &__wait); \
+ __ret; \
+ })
+
+/* Legacy wait macros; need rewriting to use __vs_wait_state(). */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_wait_event(_service, _wq, _cond) \
+ __vs_service_wait_event(_service, _wq, _cond, TASK_INTERRUPTIBLE)
+#define vs_service_wait_event_nointr(_service, _wq, _cond) \
+ __vs_service_wait_event(_service, _wq, _cond, TASK_UNINTERRUPTIBLE)
+
+/**
+ * __vs_wait_state - block until a condition becomes true on a service state.
+ *
+ * @_state: The protocol state to wait on.
+ * @_cond: Condition to wait for.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ * or an error code in ERR_PTR form.
+ *
+ * This macro blocks waiting until a particular condition becomes true on a
+ * service state. The service must be running; if not, or if it ceases to be
+ * running during the wait, -ECANCELED will be returned.
+ *
+ * This is not an exclusive wait. If an exclusive wait is desired it is
+ * usually better to use the waiting alloc or send functions.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The state lock will be dropped by waiting
+ * but reacquired before returning, unless -ENOLINK is returned, in which case
+ * the service driver has been unbound and the lock cannot be reacquired.
+ */
+#define __vs_wait_state(_state, _cond, _intr, _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (1) { \
+ prepare_to_wait(&__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (!VSERVICE_BASE_STATE_IS_RUNNING( \
+ (_state)->state.base)) { \
+ __ret = -ECANCELED; \
+ break; \
+ } \
+ \
+ if (_cond) { \
+ __ret = 0; \
+ break; \
+ } \
+ \
+ if (_intr && signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (!vs_state_lock_safe##_bh(_state)) { \
+ __ret = -ENOLINK; \
+ break; \
+ } \
+ } \
+ \
+ finish_wait(&__service->quota_wq, &__wait); \
+ __ret; \
+ })
+
+/* Specialisations of __vs_wait_state for common uses. */
+#define vs_wait_state(_state, _cond) \
+ __vs_wait_state(_state, _cond, true, -1,)
+#define vs_wait_state_timeout(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, true, _timeout,)
+#define vs_wait_state_nointr(_state, _cond) \
+ __vs_wait_state(_state, _cond, false, -1,)
+#define vs_wait_state_nointr_timeout(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, false, _timeout,)
+#define vs_wait_state_bh(_state, _cond) \
+ __vs_wait_state(_state, _cond, true, -1, _bh)
+#define vs_wait_state_timeout_bh(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, true, _timeout, _bh)
+#define vs_wait_state_nointr_bh(_state, _cond) \
+ __vs_wait_state(_state, _cond, false, -1, _bh)
+#define vs_wait_state_nointr_timeout_bh(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, false, _timeout, _bh)
+
+/**
+ * __vs_wait_alloc - block until quota is available, then allocate a buffer.
+ *
+ * @_state: The protocol state to allocate a message for.
+ * @_alloc_func: The message buffer allocation function to run. This is the
+ * full function invocation, not a pointer to the function.
+ * @_cond: Additional condition which must remain true, or else the wait
+ * will fail with -ECANCELED. This is typically used to check the
+ * service's protocol state. Note that this condition will only
+ * be checked after sleeping; it is assumed to be true when the
+ * macro is first called.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ * may then fail with -ENOLINK if the driver is detached from the
+ * service, in which case the lock is dropped.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ * or an error code in ERR_PTR form.
+ *
+ * This macro calls a specified message allocation function, and blocks
+ * if it returns -ENOBUFS, waiting until quota is available on the service
+ * before retrying. It aborts the wait if the service resets, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when using this macro on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_alloc(_state, _alloc_func, _cond, _unlock, _intr, \
+ _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ struct vs_mbuf *__mbuf = NULL; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (!vs_service_send_mbufs_available(__service)) { \
+ if (_intr && signal_pending(current)) { \
+ __mbuf = ERR_PTR(-ERESTARTSYS); \
+ break; \
+ } \
+ \
+ prepare_to_wait_exclusive( \
+ &__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (_unlock) \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __mbuf = ERR_PTR(-ETIMEDOUT); \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (_unlock && !vs_state_lock_safe##_bh( \
+ _state)) { \
+ __mbuf = ERR_PTR(-ENOLINK); \
+ break; \
+ } \
+ \
+ if (!VSERVICE_BASE_STATE_IS_RUNNING( \
+ (_state)->state.base) || \
+ !(_cond)) { \
+ __mbuf = ERR_PTR(-ECANCELED); \
+ break; \
+ } \
+ } \
+ finish_wait(&__service->quota_wq, &__wait); \
+ \
+ if (__mbuf == NULL) \
+ __mbuf = (_alloc_func); \
+ if (IS_ERR(__mbuf) && (PTR_ERR(__mbuf) != -ENOBUFS)) \
+ wake_up(&__service->quota_wq); \
+ __mbuf; \
+ })
+
+/* Specialisations of __vs_wait_alloc for common uses. */
+#define vs_wait_alloc(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_wait_alloc_timeout(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout,)
+#define vs_wait_alloc_nointr(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+#define vs_wait_alloc_nointr_timeout(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout,)
+#define vs_wait_alloc_bh(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1, _bh)
+#define vs_wait_alloc_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout, _bh)
+#define vs_wait_alloc_nointr_bh(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1, _bh)
+#define vs_wait_alloc_nointr_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout, _bh)
+#define vs_wait_alloc_locked(_state, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_alloc(_state, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+#define vs_service_waiting_alloc_cond_locked(_state, _alloc_func, _cond) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_service_waiting_alloc_cond_locked_nointr(_state, _alloc_func, _cond) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+
+/**
+ * __vs_wait_send - block until quota is available, then send a message.
+ *
+ * @_state: The protocol state to send a message for.
+ * @_cond: Additional condition which must remain true, or else the wait
+ * will fail with -ECANCELED. This is typically used to check the
+ * service's protocol state. Note that this condition will only
+ * be checked after sleeping; it is assumed to be true when the
+ * macro is first called.
+ * @_send_func: The message send function to run. This is the full function
+ * invocation, not a pointer to the function.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ * may then fail with -ENOLINK if the driver is detached from the
+ * service, in which case the lock is dropped.
+ * @_check_running: If true, the wait will return -ECANCELED if the service's
+ * base state is not active, or ceases to be active.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: If the send succeeds, then 0 is returned; otherwise an error
+ * code may be returned as described above.
+ *
+ * This macro calls a specified message send function, and blocks if it
+ * returns -ENOBUFS, waiting until quota is available on the service before
+ * retrying. It aborts the wait if it finds the service in reset, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when calling this function on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_send(_state, _cond, _send_func, _unlock, \
+ _check_running, _intr, _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret = 0; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (!vs_service_send_mbufs_available(__service)) { \
+ if (_intr && signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ prepare_to_wait_exclusive( \
+ &__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (_unlock) \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (_unlock && !vs_state_lock_safe##_bh( \
+ _state)) { \
+ __ret = -ENOLINK; \
+ break; \
+ } \
+ \
+ if ((_check_running && \
+ !VSERVICE_BASE_STATE_IS_RUNNING(\
+ (_state)->state.base)) || \
+ !(_cond)) { \
+ __ret = -ECANCELED; \
+ break; \
+ } \
+ } \
+ finish_wait(&__service->quota_wq, &__wait); \
+ \
+ if (!__ret) \
+ __ret = (_send_func); \
+ if ((__ret < 0) && (__ret != -ENOBUFS)) \
+ wake_up(&__service->quota_wq); \
+ __ret; \
+ })
+
+/* Specialisations of __vs_wait_send for common uses. */
+#define vs_wait_send(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_wait_send_timeout(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, _timeout,)
+#define vs_wait_send_nointr(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_wait_send_nointr_timeout(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, _timeout,)
+#define vs_wait_send_bh(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1, _bh)
+#define vs_wait_send_timeout_bh(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, \
+ _timeout, _bh)
+#define vs_wait_send_nointr_bh(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1, _bh)
+#define vs_wait_send_nointr_timeout_bh(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, \
+ _timeout, _bh)
+#define vs_wait_send_locked(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, false, true, true, -1,)
+#define vs_wait_send_locked_nocheck(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, false, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_send(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_nointr(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_cond(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_cond_nointr(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_nocheck(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, false, true, -1,)
+
+#endif /* _VSERVICE_SERVICE_WAIT_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e144ded910a7..cd941f854142 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4286,7 +4286,7 @@ int perf_event_release_kernel(struct perf_event *event)
* back online.
*/
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
- if (!cpu_online(event->cpu)) {
+ if (event->cpu != -1 && !cpu_online(event->cpu)) {
if (event->state == PERF_EVENT_STATE_ZOMBIE)
return 0;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 32b67ebd27e1..53974cc8d02a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -19,10 +19,6 @@
#include "sched.h"
#include "tune.h"
-#ifdef CONFIG_SCHED_WALT
-unsigned long boosted_cpu_util(int cpu);
-#endif
-
#define SUGOV_KTHREAD_PRIORITY 50
struct sugov_tunables {
@@ -182,8 +178,7 @@ static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
*util = min(rq->cfs.avg.util_avg, cfs_max);
*max = cfs_max;
- *util = cpu_util_freq(cpu, &loadcpu->walt_load);
- *util = boosted_cpu_util(cpu);
+ *util = boosted_cpu_util(cpu, &loadcpu->walt_load);
}
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2e976d73d79f..76c491e8c9ba 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,9 +37,12 @@
#include "walt.h"
#include <trace/events/sched.h>
+#ifdef CONFIG_SMP
+static inline bool task_fits_max(struct task_struct *p, int cpu);
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_SCHED_WALT
-static inline bool task_fits_max(struct task_struct *p, int cpu);
static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand);
static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
@@ -4851,9 +4854,6 @@ static inline void hrtick_update(struct rq *rq)
#ifdef CONFIG_SMP
static unsigned long capacity_orig_of(int cpu);
static unsigned long cpu_util(int cpu);
-unsigned long boosted_cpu_util(int cpu);
-#else
-#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#ifdef CONFIG_SMP
@@ -4865,7 +4865,7 @@ static void update_capacity_of(int cpu)
return;
/* Convert scale-invariant capacity to cpu. */
- req_cap = boosted_cpu_util(cpu);
+ req_cap = boosted_cpu_util(cpu, NULL);
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
}
@@ -6324,9 +6324,9 @@ schedtune_task_margin(struct task_struct *task)
#endif /* CONFIG_SCHED_TUNE */
unsigned long
-boosted_cpu_util(int cpu)
+boosted_cpu_util(int cpu, struct sched_walt_cpu_load *walt_load)
{
- unsigned long util = cpu_util_freq(cpu, NULL);
+ unsigned long util = cpu_util_freq(cpu, walt_load);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -6885,6 +6885,25 @@ bias_to_prev_cpu(struct task_struct *p, struct cpumask *rtg_target)
return true;
}
+#ifdef CONFIG_SCHED_WALT
+static inline struct cpumask *find_rtg_target(struct task_struct *p)
+{
+ struct related_thread_group *grp;
+ struct cpumask *rtg_target = NULL;
+
+ grp = task_related_thread_group(p);
+ if (grp && grp->preferred_cluster)
+ rtg_target = &grp->preferred_cluster->cpus;
+
+ return rtg_target;
+}
+#else
+static inline struct cpumask *find_rtg_target(struct task_struct *p)
+{
+ return NULL;
+}
+#endif
+
unsigned int sched_smp_overlap_capacity = SCHED_CAPACITY_SCALE;
static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
@@ -6913,7 +6932,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
bool need_idle;
enum sched_boost_policy placement_boost = task_sched_boost(p) ?
sched_boost_policy() : SCHED_BOOST_NONE;
- struct related_thread_group *grp;
cpumask_t search_cpus;
int prev_cpu = task_cpu(p);
int start_cpu = walt_start_cpu(prev_cpu);
@@ -6935,9 +6953,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
need_idle = wake_to_idle(p) || schedtune_prefer_idle(p);
if (need_idle)
sync = 0;
- grp = task_related_thread_group(p);
- if (grp && grp->preferred_cluster)
- rtg_target = &grp->preferred_cluster->cpus;
+
+ rtg_target = find_rtg_target(p);
if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
trace_sched_task_util_bias_to_waker(p, prev_cpu,
@@ -10759,6 +10776,24 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_WALT
+static inline void
+walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
+{
+ bool misfit = rq->misfit_task;
+
+ if (curr->misfit != misfit) {
+ walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
+ curr->misfit = misfit;
+ }
+}
+#else
+static inline void
+walt_update_misfit_task(struct rq *rq, struct task_struct *curr)
+{
+}
+#endif
+
/*
* scheduler tick hitting a task of our scheduling class:
*/
@@ -10766,10 +10801,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
-#ifdef CONFIG_SMP
- bool old_misfit = curr->misfit;
- bool misfit;
-#endif
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -10785,15 +10816,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
trace_sched_overutilized(true);
}
- misfit = !task_fits_max(curr, rq->cpu);
- rq->misfit_task = misfit;
-
- if (old_misfit != misfit) {
- walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
- curr->misfit = misfit;
- }
+ rq->misfit_task = !task_fits_max(curr, rq->cpu);
#endif
-
+ walt_update_misfit_task(rq, curr);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index cbc54ebcc6d2..79f55a18ffa6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1107,6 +1107,12 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
+#define NO_BOOST 0
+#define FULL_THROTTLE_BOOST 1
+#define CONSERVATIVE_BOOST 2
+#define RESTRAINED_BOOST 3
+
+
enum sched_boost_policy {
SCHED_BOOST_NONE,
SCHED_BOOST_ON_BIG,
@@ -1891,8 +1897,13 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
return cpu_util_freq_pelt(cpu);
}
+#define sched_ravg_window TICK_NSEC
+#define sysctl_sched_use_walt_cpu_util 0
+
#endif /* CONFIG_SCHED_WALT */
+extern unsigned long
+boosted_cpu_util(int cpu, struct sched_walt_cpu_load *walt_load);
#endif
extern unsigned int capacity_margin_freq;
@@ -2417,11 +2428,6 @@ extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-#define NO_BOOST 0
-#define FULL_THROTTLE_BOOST 1
-#define CONSERVATIVE_BOOST 2
-#define RESTRAINED_BOOST 3
-
static inline int cpu_capacity(int cpu)
{
return cpu_rq(cpu)->cluster->capacity;
@@ -2822,9 +2828,12 @@ static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load_wakeup(...)
static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { }
-
static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
-
+static inline bool
+task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ return 0;
+}
#ifdef CONFIG_SMP
static inline unsigned long thermal_cap(int cpu)
{
@@ -2839,6 +2848,11 @@ static inline int cpu_max_power_cost(int cpu)
static inline void clear_walt_request(int cpu) { }
+static inline int is_reserved(int cpu)
+{
+ return 0;
+}
+
static inline int got_boost_kick(void)
{
return 0;
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 93643bac35af..898f3a0733e4 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -753,6 +753,10 @@ static void schedtune_attach(struct cgroup_taskset *tset)
sync_cgroup_colocation(task, colocate);
}
+#else
+static void schedtune_attach(struct cgroup_taskset *tset)
+{
+}
#endif
static int
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 7edae12f38f2..5caccea33004 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -377,6 +377,10 @@ static inline int walt_start_cpu(int prev_cpu)
return prev_cpu;
}
+static inline u64 sched_irqload(int cpu)
+{
+ return 0;
+}
#endif /* CONFIG_SCHED_WALT */
#endif
diff --git a/mm/swap.c b/mm/swap.c
index c0ce1188dc73..582722507484 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -208,9 +208,10 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
{
int *pgmoved = arg;
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- enum lru_list lru = page_lru_base_type(page);
- list_move_tail(&page->lru, &lruvec->lists[lru]);
+ if (PageLRU(page) && !PageUnevictable(page)) {
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ ClearPageActive(page);
+ add_page_to_lru_list_tail(page, lruvec, page_lru(page));
(*pgmoved)++;
}
}
@@ -234,7 +235,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
*/
void rotate_reclaimable_page(struct page *page)
{
- if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+ if (!PageLocked(page) && !PageDirty(page) &&
!PageUnevictable(page) && PageLRU(page)) {
struct pagevec *pvec;
unsigned long flags;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 658d62c47072..ca06de25ee68 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,7 @@ struct scan_control {
/* The highest zone to isolate pages for reclaim from */
enum zone_type reclaim_idx;
+ /* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
/* Can mapped pages be reclaimed? */
@@ -1057,6 +1058,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* throttling so we could easily OOM just because too many
* pages are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
+ *
+ * In cases 1) and 2) we activate the pages to get them out of
+ * the way while we continue scanning for clean pages on the
+ * inactive list and refilling from the active list. The
+ * observation here is that waiting for disk writes is more
+ * expensive than potentially causing reloads down the line.
+ * Since they're marked for immediate reclaim, they won't put
+ * memory pressure on the cache working set any longer than it
+ * takes to write them to disk.
*/
if (PageWriteback(page)) {
/* Case 1 above */
@@ -1064,7 +1074,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
PageReclaim(page) &&
(pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
nr_immediate++;
- goto keep_locked;
+ goto activate_locked;
/* Case 2 above */
} else if (sane_reclaim(sc) ||
@@ -1082,7 +1092,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
SetPageReclaim(page);
nr_writeback++;
- goto keep_locked;
+ goto activate_locked;
/* Case 3 above */
} else {
@@ -1153,14 +1163,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) {
/*
- * Only kswapd can writeback filesystem pages to
- * avoid risk of stack overflow but only writeback
- * if many dirty pages have been encountered.
+ * Only kswapd can writeback filesystem pages
+ * to avoid risk of stack overflow. But avoid
+ * injecting inefficient single-page IO into
+ * flusher writeback as much as possible: only
+ * write pages when we've encountered many
+ * dirty pages, and when we've already scanned
+ * the rest of the LRU for clean pages and see
+ * the same dirty pages again (PageReclaim).
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() ||
- (pgdat &&
- !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
+ (!current_is_kswapd() || !PageReclaim(page) ||
+ !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1170,7 +1184,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
- goto keep_locked;
+ goto activate_locked;
}
if (references == PAGEREF_RECLAIM_CLEAN)
@@ -1416,13 +1430,10 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
* wants to isolate pages it will be able to operate on without
* blocking - clean pages for the most part.
*
- * ISOLATE_CLEAN means that only clean pages should be isolated. This
- * is used by reclaim when it is cannot write to backing storage
- *
* ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
* that it is possible to migrate without blocking
*/
- if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+ if (mode & ISOLATE_ASYNC_MIGRATE) {
/* All the caller can do on PageWriteback is block */
if (PageWriteback(page))
return ret;
@@ -1430,10 +1441,6 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
if (PageDirty(page)) {
struct address_space *mapping;
- /* ISOLATE_CLEAN means only clean pages */
- if (mode & ISOLATE_CLEAN)
- return ret;
-
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
@@ -1831,8 +1838,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -1894,6 +1899,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
/*
+ * If dirty pages are scanned that are not queued for IO, it
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty pages to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty pages grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep.
+ */
+ if (nr_unqueued_dirty == nr_taken)
+ wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+
+ /*
* Legacy memcg will stall in page writeback so avoid forcibly
* stalling here.
*/
@@ -1905,12 +1924,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (nr_dirty && nr_dirty == nr_congested)
set_bit(PGDAT_CONGESTED, &pgdat->flags);
- /*
- * If dirty pages are scanned that are not queued for IO, it
- * implies that flushers are not keeping up. In this case, flag
- * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
- * reclaim context.
- */
+ /* Allow kswapd to start writing pages during reclaim. */
if (nr_unqueued_dirty == nr_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags);
@@ -2020,8 +2034,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -2823,8 +2835,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
int initial_priority = sc->priority;
- unsigned long total_scanned = 0;
- unsigned long writeback_threshold;
retry:
delayacct_freepages_start();
@@ -2837,7 +2847,6 @@ retry:
sc->nr_scanned = 0;
shrink_zones(zonelist, sc);
- total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
break;
@@ -2850,20 +2859,6 @@ retry:
*/
if (sc->priority < DEF_PRIORITY - 2)
sc->may_writepage = 1;
-
- /*
- * Try to write back as many pages as we just scanned. This
- * tends to cause slow streaming writers to write data to the
- * disk smoothly, at the dirtying rate, which is nice. But
- * that's undesirable in laptop mode, where we *want* lumpy
- * writeout. So in laptop mode, write out the whole world.
- */
- writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
- if (total_scanned > writeback_threshold) {
- wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
- WB_REASON_TRY_TO_FREE_PAGES);
- sc->may_writepage = 1;
- }
} while (--sc->priority >= 0);
delayacct_freepages_end();
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
index 77b45bca3b87..f238edbfb823 100644
--- a/net/core/sockev_nlmcast.c
+++ b/net/core/sockev_nlmcast.c
@@ -69,14 +69,17 @@ static int sockev_client_cb(struct notifier_block *nb,
struct nlmsghdr *nlh;
struct sknlsockevmsg *smsg;
struct socket *sock;
+ struct sock *sk;
sock = (struct socket *)data;
- if (socknlmsgsk == 0)
+ if (!socknlmsgsk || !sock)
goto done;
- if ((!socknlmsgsk) || (!sock) || (!sock->sk))
+
+ sk = sock->sk;
+ if (!sk)
goto done;
- if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6)
+ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
goto done;
if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
@@ -97,12 +100,11 @@ static int sockev_client_cb(struct notifier_block *nb,
smsg = nlmsg_data(nlh);
smsg->pid = current->pid;
_sockev_event(event, smsg->event, sizeof(smsg->event));
- smsg->skfamily = sock->sk->sk_family;
- smsg->skstate = sock->sk->sk_state;
- smsg->skprotocol = sock->sk->sk_protocol;
- smsg->sktype = sock->sk->sk_type;
- smsg->skflags = sock->sk->sk_flags;
-
+ smsg->skfamily = sk->sk_family;
+ smsg->skstate = sk->sk_state;
+ smsg->skprotocol = sk->sk_protocol;
+ smsg->sktype = sk->sk_type;
+ smsg->skflags = sk->sk_flags;
nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
done:
return 0;
diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig
index 30cd45a70208..20f94aa3a976 100644
--- a/net/ipc_router/Kconfig
+++ b/net/ipc_router/Kconfig
@@ -23,3 +23,22 @@ config IPC_ROUTER_SECURITY
once configured with the security rules will ensure that the
sender of the message to a service belongs to the relevant
Linux group as configured by the security script.
+
+config IPC_ROUTER_NODE_ID
+ depends on IPC_ROUTER
+ int "IPC router local NODE ID"
+ default 1
+ help
+ This option allows to configure the IPC Router NODE ID dynamically.
+ The NODE defined here is used as the local NODE ID by IPC Router
+ core and publish the same NODE ID to other NODES present in the
+ network.
+
+config IPC_ROUTER_FIFO_XPRT
+ depends on IPC_ROUTER
+ bool "IPC Router FIFO Transport"
+ help
+ FIFO Transport Layer that enables IPC Router communication between
+ two virtual machines. When the Shared FIFO becomes available, this
+ layer registers the transport with IPC Router and enable message
+ exchange.
diff --git a/net/ipc_router/Makefile b/net/ipc_router/Makefile
index 501688e42e3d..63d33a522a75 100644
--- a/net/ipc_router/Makefile
+++ b/net/ipc_router/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_IPC_ROUTER) := ipc_router_core.o
obj-$(CONFIG_IPC_ROUTER) += ipc_router_socket.o
obj-$(CONFIG_IPC_ROUTER_SECURITY) += ipc_router_security.o
+obj-$(CONFIG_IPC_ROUTER_FIFO_XPRT) += ipc_router_fifo_xprt.o
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index 34031088cfd8..1a6b539f252e 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -137,6 +137,7 @@ struct msm_ipc_router_xprt_info {
struct msm_ipc_router_xprt *xprt;
u32 remote_node_id;
u32 initialized;
+ u32 hello_sent;
struct list_head pkt_list;
struct wakeup_source ws;
struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
@@ -2494,12 +2495,36 @@ static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
}
}
+static int send_hello_msg(struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int rc = 0;
+ union rr_control_msg ctl;
+
+ if (!xprt_info->hello_sent) {
+ xprt_info->hello_sent = 1;
+ /* Send a HELLO message */
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+ ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+ ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
+ ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+ rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+ IPC_ROUTER_DUMMY_DEST_NODE);
+ if (rc < 0) {
+ xprt_info->hello_sent = 0;
+ IPC_RTR_ERR("%s: Error sending HELLO message\n",
+ __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
union rr_control_msg *msg,
struct rr_header_v1 *hdr)
{
int i, rc = 0;
- union rr_control_msg ctl;
struct msm_ipc_routing_table_entry *rt_entry;
if (!hdr)
@@ -2514,19 +2539,10 @@ static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
kref_put(&rt_entry->ref, ipc_router_release_rtentry);
do_version_negotiation(xprt_info, msg);
- /* Send a reply HELLO message */
- memset(&ctl, 0, sizeof(ctl));
- ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
- ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
- ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
- ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
- rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
- IPC_ROUTER_DUMMY_DEST_NODE);
- if (rc < 0) {
- IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
- __func__);
+ rc = send_hello_msg(xprt_info);
+ if (rc < 0)
return rc;
- }
+
xprt_info->initialized = 1;
/* Send list of servers from the local node and from nodes
@@ -4068,6 +4084,7 @@ static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
xprt_info->xprt = xprt;
xprt_info->initialized = 0;
+ xprt_info->hello_sent = 0;
xprt_info->remote_node_id = -1;
INIT_LIST_HEAD(&xprt_info->pkt_list);
mutex_init(&xprt_info->rx_lock_lhb2);
@@ -4109,6 +4126,7 @@ static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
up_write(&routing_table_lock_lha3);
xprt->priv = xprt_info;
+ send_hello_msg(xprt_info);
return 0;
}
diff --git a/net/ipc_router/ipc_router_fifo_xprt.c b/net/ipc_router/ipc_router_fifo_xprt.c
new file mode 100644
index 000000000000..c90534dac923
--- /dev/null
+++ b/net/ipc_router/ipc_router_fifo_xprt.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <microvisor/microvisor.h>
+
+#define MODULE_NAME "ipc_router_fifo_xprt"
+#define XPRT_NAME_LEN 32
+
+#define FIFO_MAGIC_KEY 0x24495043 /* "$IPC" */
+#define FIFO_SIZE 0x4000
+#define FIFO_0_START 0x1000
+#define FIFO_1_START (FIFO_0_START + FIFO_SIZE)
+#define FIFO_MAGIC_IDX 0x0
+#define TAIL_0_IDX 0x1
+#define HEAD_0_IDX 0x2
+#define TAIL_1_IDX 0x3
+#define HEAD_1_IDX 0x4
+
+struct msm_ipc_pipe {
+ __le32 *tail;
+ __le32 *head;
+
+ void *fifo;
+ size_t length;
+};
+
+/**
+ * ipcr_fifo_xprt - IPC Router's FIFO XPRT structure
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @tx_pipe: TX FIFO specific info.
+ * @rx_pipe: RX FIFO specific info.
+ * @fifo_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @in_pkt: Pointer to any partially read packet.
+ * @read_work: Read Work to perform read operation from SMD.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ * by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ */
+struct ipcr_fifo_xprt {
+ struct msm_ipc_router_xprt xprt;
+ struct msm_ipc_pipe tx_pipe;
+ struct msm_ipc_pipe rx_pipe;
+ struct workqueue_struct *xprt_wq;
+ struct rr_packet *in_pkt;
+ struct delayed_work read_work;
+ struct completion sft_close_complete;
+ unsigned int xprt_version;
+ struct platform_driver driver;
+ char xprt_name[XPRT_NAME_LEN];
+ void *fifo_base;
+ size_t fifo_size;
+ int tx_fifo_idx;
+ okl4_kcap_t kcap;
+};
+
+static void xprt_read_data(struct work_struct *work);
+static void ipcr_fifo_raise_virq(struct ipcr_fifo_xprt *xprtp);
+
+static size_t fifo_rx_avail(struct msm_ipc_pipe *pipe)
+{
+ u32 head;
+ u32 tail;
+
+ head = le32_to_cpu(*pipe->head);
+ tail = le32_to_cpu(*pipe->tail);
+
+ if (head < tail)
+ return pipe->length - tail + head;
+
+ return head - tail;
+}
+
+static void fifo_rx_peak(struct msm_ipc_pipe *pipe,
+ void *data, unsigned int offset, size_t count)
+{
+ size_t len;
+ u32 tail;
+
+ tail = le32_to_cpu(*pipe->tail);
+ tail += offset;
+ if (tail >= pipe->length)
+ tail -= pipe->length;
+
+ len = min_t(size_t, count, pipe->length - tail);
+ if (len)
+ memcpy_fromio(data, pipe->fifo + tail, len);
+
+ if (len != count)
+ memcpy_fromio(data + len, pipe->fifo, (count - len));
+}
+
+static void fifo_rx_advance(struct msm_ipc_pipe *pipe, size_t count)
+{
+ u32 tail;
+
+ tail = le32_to_cpu(*pipe->tail);
+
+ tail += count;
+ if (tail > pipe->length)
+ tail -= pipe->length;
+
+ *pipe->tail = cpu_to_le32(tail);
+}
+
+static size_t fifo_tx_avail(struct msm_ipc_pipe *pipe)
+{
+ u32 head;
+ u32 tail;
+ u32 avail;
+
+ head = le32_to_cpu(*pipe->head);
+ tail = le32_to_cpu(*pipe->tail);
+
+ if (tail <= head)
+ avail = pipe->length - head + tail;
+ else
+ avail = tail - head;
+
+ return avail;
+}
+
+static void fifo_tx_write(struct msm_ipc_pipe *pipe,
+ const void *data, size_t count)
+{
+ size_t len;
+ u32 head;
+
+ head = le32_to_cpu(*pipe->head);
+
+ len = min_t(size_t, count, pipe->length - head);
+ if (len)
+ memcpy_toio(pipe->fifo + head, data, len);
+
+ if (len != count)
+ memcpy_toio(pipe->fifo, data + len, count - len);
+
+ head += count;
+ if (head >= pipe->length)
+ head -= pipe->length;
+
+ /* Ensure ordering of fifo and head update */
+ wmb();
+
+ *pipe->head = cpu_to_le32(head);
+}
+
+/**
+ * set_xprt_version() - Set IPC Router header version in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void set_xprt_version(struct msm_ipc_router_xprt *xprt,
+ unsigned int version)
+{
+ struct ipcr_fifo_xprt *xprtp;
+
+ if (!xprt)
+ return;
+ xprtp = container_of(xprt, struct ipcr_fifo_xprt, xprt);
+ xprtp->xprt_version = version;
+}
+
+static int get_xprt_version(struct msm_ipc_router_xprt *xprt)
+{
+ struct ipcr_fifo_xprt *xprtp;
+
+ if (!xprt)
+ return -EINVAL;
+ xprtp = container_of(xprt, struct ipcr_fifo_xprt, xprt);
+ return (int)xprtp->xprt_version;
+}
+
+static int get_xprt_option(struct msm_ipc_router_xprt *xprt)
+{
+ /* fragmented data is NOT supported */
+ return 0;
+}
+
+static int xprt_close(struct msm_ipc_router_xprt *xprt)
+{
+ return 0;
+}
+
+static void xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+ struct ipcr_fifo_xprt *xprtp;
+
+ if (!xprt)
+ return;
+
+ xprtp = container_of(xprt, struct ipcr_fifo_xprt, xprt);
+ complete_all(&xprtp->sft_close_complete);
+}
+
+static int xprt_write(void *data, uint32_t len,
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct rr_packet *pkt = (struct rr_packet *)data;
+ struct sk_buff *skb;
+ struct ipcr_fifo_xprt *xprtp;
+
+ xprtp = container_of(xprt, struct ipcr_fifo_xprt, xprt);
+
+ if (!pkt)
+ return -EINVAL;
+
+ if (!len || pkt->length != len)
+ return -EINVAL;
+
+ /* TODO: FIFO write : check if we can write full packet at one shot */
+ if (skb_queue_len(pkt->pkt_fragment_q) != 1) {
+ pr_err("IPC router core is given fragmented data\n");
+ return -EINVAL;
+ }
+ if (fifo_tx_avail(&xprtp->tx_pipe) < len) {
+ pr_err("No Space in FIFO\n");
+ return -EAGAIN;
+ }
+
+ skb_queue_walk(pkt->pkt_fragment_q, skb) {
+ fifo_tx_write(&xprtp->tx_pipe, skb->data, skb->len);
+ }
+
+ ipcr_fifo_raise_virq(xprtp);
+
+ return len;
+}
+
+static void xprt_read_data(struct work_struct *work)
+{
+ void *data;
+ size_t hdr_len;
+ size_t rx_avail;
+ size_t pkt_len;
+ struct rr_header_v1 hdr;
+ struct sk_buff *ipc_rtr_pkt;
+ struct ipcr_fifo_xprt *xprtp;
+ struct delayed_work *rwork = to_delayed_work(work);
+
+ xprtp = container_of(rwork, struct ipcr_fifo_xprt, read_work);
+
+ hdr_len = sizeof(struct rr_header_v1);
+ while (1) {
+ rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+ if (!rx_avail)
+ break;
+
+ fifo_rx_peak(&xprtp->rx_pipe, &hdr, 0, hdr_len);
+ pkt_len = ipc_router_peek_pkt_size((char *)&hdr);
+
+ if (pkt_len < 0) {
+ pr_err("%s invalid pkt_len %zu\n", __func__, pkt_len);
+ break;
+ }
+ if (!xprtp->in_pkt) {
+ xprtp->in_pkt = create_pkt(NULL);
+ if (!xprtp->in_pkt)
+ break;
+ }
+ ipc_rtr_pkt = alloc_skb(pkt_len, GFP_KERNEL);
+ if (!ipc_rtr_pkt) {
+ release_pkt(xprtp->in_pkt);
+ xprtp->in_pkt = NULL;
+ break;
+ }
+ data = skb_put(ipc_rtr_pkt, pkt_len);
+ do {
+ rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+ if (rx_avail >= pkt_len) {
+ fifo_rx_peak(&xprtp->rx_pipe, data, 0, pkt_len);
+ fifo_rx_advance(&xprtp->rx_pipe, pkt_len);
+ break;
+ }
+ pr_debug("%s wait for FULL PKT [avail: len][%zu:%zu]\n",
+ __func__, rx_avail, pkt_len);
+ /* wait for complete packet written into FIFO */
+ msleep(20);
+ } while (1);
+
+ skb_queue_tail(xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
+ xprtp->in_pkt->length = pkt_len;
+ msm_ipc_router_xprt_notify(&xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_DATA,
+ (void *)xprtp->in_pkt);
+ release_pkt(xprtp->in_pkt);
+ xprtp->in_pkt = NULL;
+ }
+}
+
+static void ipcr_fifo_raise_virq(struct ipcr_fifo_xprt *xprtp)
+{
+ okl4_error_t err;
+ unsigned long payload = 0xffff;
+
+ err = _okl4_sys_vinterrupt_raise(xprtp->kcap, payload);
+}
+
+static irqreturn_t ipcr_fifo_virq_handler(int irq, void *dev_id)
+{
+ struct ipcr_fifo_xprt *xprtp = dev_id;
+
+ queue_delayed_work(xprtp->xprt_wq, &xprtp->read_work, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ipcr_fifo_config_init() - init FIFO xprt configs
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the FIFO XPRT pointer with
+ * the FIFO XPRT configurations either from device tree or static arrays.
+ */
+static int ipcr_fifo_config_init(struct ipcr_fifo_xprt *xprtp)
+{
+ __le32 *descs;
+
+ descs = xprtp->fifo_base;
+ descs[FIFO_MAGIC_IDX] = FIFO_MAGIC_KEY;
+
+ if (xprtp->tx_fifo_idx) {
+ xprtp->tx_pipe.tail = &descs[TAIL_0_IDX];
+ xprtp->tx_pipe.head = &descs[HEAD_0_IDX];
+ xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+ xprtp->tx_pipe.length = FIFO_SIZE;
+
+ xprtp->rx_pipe.tail = &descs[TAIL_1_IDX];
+ xprtp->rx_pipe.head = &descs[HEAD_1_IDX];
+ xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+ xprtp->rx_pipe.length = FIFO_SIZE;
+ } else {
+ xprtp->tx_pipe.tail = &descs[TAIL_1_IDX];
+ xprtp->tx_pipe.head = &descs[HEAD_1_IDX];
+ xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+ xprtp->tx_pipe.length = FIFO_SIZE;
+
+ xprtp->rx_pipe.tail = &descs[TAIL_0_IDX];
+ xprtp->rx_pipe.head = &descs[HEAD_0_IDX];
+ xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+ xprtp->rx_pipe.length = FIFO_SIZE;
+ }
+
+ /* Reset respective index */
+ *xprtp->tx_pipe.head = 0;
+ *xprtp->rx_pipe.tail = 0;
+
+ xprtp->xprt.link_id = 1;
+ xprtp->xprt_version = 1;
+
+ strlcpy(xprtp->xprt_name, "IPCR_FIFO_XPRT", XPRT_NAME_LEN);
+ xprtp->xprt.name = xprtp->xprt_name;
+
+ xprtp->xprt.set_version = set_xprt_version;
+ xprtp->xprt.get_version = get_xprt_version;
+ xprtp->xprt.get_option = get_xprt_option;
+ xprtp->xprt.read_avail = NULL;
+ xprtp->xprt.read = NULL;
+ xprtp->xprt.write_avail = NULL;
+ xprtp->xprt.write = xprt_write;
+ xprtp->xprt.close = xprt_close;
+ xprtp->xprt.sft_close_done = xprt_sft_close_done;
+ xprtp->xprt.priv = NULL;
+
+ xprtp->in_pkt = NULL;
+ xprtp->xprt_wq = create_singlethread_workqueue(xprtp->xprt_name);
+ if (!xprtp->xprt_wq)
+ return -EFAULT;
+
+ INIT_DELAYED_WORK(&xprtp->read_work, xprt_read_data);
+
+ msm_ipc_router_xprt_notify(&xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_OPEN,
+ NULL);
+
+ if (fifo_rx_avail(&xprtp->rx_pipe))
+ queue_delayed_work(xprtp->xprt_wq, &xprtp->read_work, 0);
+
+ return 0;
+}
+
+/**
+ * ipcr_fifo_xprt_probe() - Probe an FIFO xprt
+ *
+ * @pdev: Platform device corresponding to FIFO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an FIFO transport.
+ */
+static int ipcr_fifo_xprt_probe(struct platform_device *pdev)
+{
+ int irq;
+ int ret;
+ struct resource *r;
+ struct device *parent;
+ struct ipcr_fifo_xprt *xprtp;
+ struct device_node *ipc_irq_np;
+ struct device_node *ipc_shm_np;
+ struct platform_device *ipc_shm_dev;
+
+ xprtp = devm_kzalloc(&pdev->dev, sizeof(*xprtp), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(xprtp))
+ return -ENOMEM;
+
+ parent = &pdev->dev;
+ ipc_irq_np = parent->of_node;
+
+ irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0) {
+ ret = devm_request_irq(parent, irq, ipcr_fifo_virq_handler,
+ IRQF_TRIGGER_RISING, dev_name(parent),
+ xprtp);
+ if (ret < 0)
+ return -ENODEV;
+ }
+
+ /* this kcap is required to raise VIRQ */
+ ret = of_property_read_u32(ipc_irq_np, "reg", &xprtp->kcap);
+ if (ret < 0)
+ return -ENODEV;
+
+ ipc_shm_np = of_parse_phandle(ipc_irq_np, "qcom,ipc-shm", 0);
+ if (!ipc_shm_np)
+ return -ENODEV;
+
+ ipc_shm_dev = of_find_device_by_node(ipc_shm_np);
+ if (!ipc_shm_dev)
+ return -ENODEV;
+
+ r = platform_get_resource(ipc_shm_dev, IORESOURCE_MEM, 0);
+ if (!r) {
+ pr_err("%s failed to get shared FIFO\n", __func__);
+ return -ENODEV;
+ }
+
+ xprtp->tx_fifo_idx = of_property_read_bool(ipc_shm_np,
+ "qcom,tx-is-first");
+
+ xprtp->fifo_size = resource_size(r);
+ xprtp->fifo_base = devm_ioremap_nocache(&pdev->dev, r->start,
+ resource_size(r));
+ if (!xprtp->fifo_base) {
+ pr_err("%s ioreamp_nocache() failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = ipcr_fifo_config_init(xprtp);
+ if (ret) {
+ IPC_RTR_ERR("%s init failed ret[%d]\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ipcr_fifo_xprt_match_table[] = {
+ { .compatible = "qcom,ipcr-fifo-xprt" },
+ {},
+};
+
+static struct platform_driver ipcr_fifo_xprt_driver = {
+ .probe = ipcr_fifo_xprt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipcr_fifo_xprt_match_table,
+ },
+};
+
+static int __init ipcr_fifo_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&ipcr_fifo_xprt_driver);
+ if (rc) {
+ IPC_RTR_ERR("%s: driver register failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+module_init(ipcr_fifo_xprt_init);
+MODULE_DESCRIPTION("IPC Router FIFO XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/net/ipc_router/ipc_router_private.h b/net/ipc_router/ipc_router_private.h
index 3ec981812982..6e0c4bebc05c 100644
--- a/net/ipc_router/ipc_router_private.h
+++ b/net/ipc_router/ipc_router_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,7 +37,7 @@
#define IPC_ROUTER_ADDRESS 0x0000FFFF
-#define IPC_ROUTER_NID_LOCAL 1
+#define IPC_ROUTER_NID_LOCAL CONFIG_IPC_ROUTER_NODE_ID
#define MAX_IPC_PKT_SIZE 66000
#define IPC_ROUTER_LOW_RX_QUOTA 5
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
index e1d01004d50e..a86042c98e1f 100644
--- a/security/pfe/pfk_ice.c
+++ b/security/pfe/pfk_ice.c
@@ -138,9 +138,10 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
if (ret1)
pr_err("%s: Invalidate Key Error: %d\n", __func__,
ret1);
- goto out;
}
- ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+ ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false);
+ if (ret1)
+ pr_err("%s: Error %d disabling clocks\n", __func__, ret1);
out:
return ret;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 051ee18964b3..31e12542e150 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1433,7 +1433,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
- context.len = scontext_len;
+ context.len = strlen(str) + 1;
str = NULL;
} else if (rc)
goto out_unlock;